mirror of
https://github.com/Z3Prover/z3
synced 2025-04-22 16:45:31 +00:00
commit
0eb2915e83
80 changed files with 19399 additions and 11 deletions
|
@ -14,6 +14,7 @@ set(Z3_API_HEADER_FILES_TO_SCAN
|
|||
z3_optimization.h
|
||||
z3_interp.h
|
||||
z3_fpa.h
|
||||
z3_spacer.h
|
||||
)
|
||||
set(Z3_FULL_PATH_API_HEADER_FILES_TO_SCAN "")
|
||||
foreach (header_file ${Z3_API_HEADER_FILES_TO_SCAN})
|
||||
|
@ -92,6 +93,7 @@ add_subdirectory(muz/tab)
|
|||
add_subdirectory(muz/bmc)
|
||||
add_subdirectory(muz/ddnf)
|
||||
add_subdirectory(muz/duality)
|
||||
add_subdirectory(muz/spacer)
|
||||
add_subdirectory(muz/fp)
|
||||
add_subdirectory(tactic/nlsat_smt)
|
||||
add_subdirectory(tactic/ufbv)
|
||||
|
@ -168,6 +170,7 @@ set (libz3_public_headers
|
|||
z3_polynomial.h
|
||||
z3_rcf.h
|
||||
z3_v1.h
|
||||
z3_spacer.h
|
||||
)
|
||||
foreach (header ${libz3_public_headers})
|
||||
set_property(TARGET libz3 APPEND PROPERTY
|
||||
|
|
|
@ -57,6 +57,7 @@ z3_add_component(api
|
|||
api_parsers.cpp
|
||||
api_pb.cpp
|
||||
api_polynomial.cpp
|
||||
api_qe.cpp
|
||||
api_quant.cpp
|
||||
api_rcf.cpp
|
||||
api_seq.cpp
|
||||
|
|
|
@ -605,5 +605,6 @@ extern "C" {
|
|||
|
||||
}
|
||||
|
||||
#include "api_datalog_spacer.inc"
|
||||
|
||||
};
|
||||
|
|
113
src/api/api_datalog_spacer.inc
Normal file
113
src/api/api_datalog_spacer.inc
Normal file
|
@ -0,0 +1,113 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
api_datalog_spacer.inc
|
||||
|
||||
Abstract:
|
||||
|
||||
Spacer-specific datalog API
|
||||
|
||||
Author:
|
||||
|
||||
Arie Gurfinkel (arie)
|
||||
|
||||
Notes:
|
||||
this file is included at the bottom of api_datalog.cpp
|
||||
|
||||
--*/
|
||||
Z3_lbool Z3_API Z3_fixedpoint_query_from_lvl (Z3_context c, Z3_fixedpoint d, Z3_ast q, unsigned lvl) {
|
||||
Z3_TRY;
|
||||
LOG_Z3_fixedpoint_query_from_lvl (c, d, q, lvl);
|
||||
RESET_ERROR_CODE();
|
||||
lbool r = l_undef;
|
||||
unsigned timeout = to_fixedpoint(d)->m_params.get_uint("timeout", mk_c(c)->get_timeout());
|
||||
unsigned rlimit = to_fixedpoint(d)->m_params.get_uint("rlimit", mk_c(c)->get_rlimit());
|
||||
{
|
||||
scoped_rlimit _rlimit(mk_c(c)->m().limit(), rlimit);
|
||||
cancel_eh<reslimit> eh(mk_c(c)->m().limit());
|
||||
api::context::set_interruptable si(*(mk_c(c)), eh);
|
||||
scoped_timer timer(timeout, &eh);
|
||||
try {
|
||||
r = to_fixedpoint_ref(d)->ctx().query_from_lvl (to_expr(q), lvl);
|
||||
}
|
||||
catch (z3_exception& ex) {
|
||||
mk_c(c)->handle_exception(ex);
|
||||
r = l_undef;
|
||||
}
|
||||
to_fixedpoint_ref(d)->ctx().cleanup();
|
||||
}
|
||||
return of_lbool(r);
|
||||
Z3_CATCH_RETURN(Z3_L_UNDEF);
|
||||
}
|
||||
|
||||
Z3_ast Z3_API Z3_fixedpoint_get_ground_sat_answer(Z3_context c, Z3_fixedpoint d) {
|
||||
Z3_TRY;
|
||||
LOG_Z3_fixedpoint_get_ground_sat_answer(c, d);
|
||||
RESET_ERROR_CODE();
|
||||
expr* e = to_fixedpoint_ref(d)->ctx().get_ground_sat_answer();
|
||||
mk_c(c)->save_ast_trail(e);
|
||||
RETURN_Z3(of_expr(e));
|
||||
Z3_CATCH_RETURN(0);
|
||||
}
|
||||
|
||||
Z3_ast_vector Z3_API Z3_fixedpoint_get_rules_along_trace(
|
||||
Z3_context c,
|
||||
Z3_fixedpoint d)
|
||||
{
|
||||
Z3_TRY;
|
||||
LOG_Z3_fixedpoint_get_rules_along_trace(c, d);
|
||||
ast_manager& m = mk_c(c)->m();
|
||||
Z3_ast_vector_ref* v = alloc(Z3_ast_vector_ref, *mk_c(c), m);
|
||||
mk_c(c)->save_object(v);
|
||||
expr_ref_vector rules(m);
|
||||
svector<symbol> names;
|
||||
|
||||
to_fixedpoint_ref(d)->ctx().get_rules_along_trace_as_formulas(rules, names);
|
||||
for (unsigned i = 0; i < rules.size(); ++i) {
|
||||
v->m_ast_vector.push_back(rules[i].get());
|
||||
}
|
||||
RETURN_Z3(of_ast_vector(v));
|
||||
Z3_CATCH_RETURN(0);
|
||||
}
|
||||
|
||||
Z3_symbol Z3_API Z3_fixedpoint_get_rule_names_along_trace(
|
||||
Z3_context c,
|
||||
Z3_fixedpoint d)
|
||||
{
|
||||
Z3_TRY;
|
||||
LOG_Z3_fixedpoint_get_rule_names_along_trace(c, d);
|
||||
ast_manager& m = mk_c(c)->m();
|
||||
Z3_ast_vector_ref* v = alloc(Z3_ast_vector_ref, *mk_c(c), m);
|
||||
mk_c(c)->save_object(v);
|
||||
expr_ref_vector rules(m);
|
||||
svector<symbol> names;
|
||||
std::stringstream ss;
|
||||
|
||||
to_fixedpoint_ref(d)->ctx().get_rules_along_trace_as_formulas(rules, names);
|
||||
for (unsigned i = 0; i < names.size(); ++i) {
|
||||
ss << ";" << names[i].str();
|
||||
}
|
||||
RETURN_Z3(of_symbol(symbol(ss.str().substr(1).c_str())));
|
||||
Z3_CATCH_RETURN(0);
|
||||
}
|
||||
|
||||
void Z3_API Z3_fixedpoint_add_invariant(Z3_context c, Z3_fixedpoint d, Z3_func_decl pred, Z3_ast property) {
|
||||
Z3_TRY;
|
||||
LOG_Z3_fixedpoint_add_invariant(c, d, pred, property);
|
||||
RESET_ERROR_CODE();
|
||||
to_fixedpoint_ref(d)->ctx ().add_invariant(to_func_decl(pred), to_expr(property));
|
||||
Z3_CATCH;
|
||||
}
|
||||
|
||||
Z3_ast Z3_API Z3_fixedpoint_get_reachable(Z3_context c, Z3_fixedpoint d, Z3_func_decl pred) {
|
||||
Z3_TRY;
|
||||
LOG_Z3_fixedpoint_get_reachable(c, d, pred);
|
||||
RESET_ERROR_CODE();
|
||||
expr_ref r = to_fixedpoint_ref(d)->ctx().get_reachable(to_func_decl(pred));
|
||||
mk_c(c)->save_ast_trail(r);
|
||||
RETURN_Z3(of_expr(r.get()));
|
||||
Z3_CATCH_RETURN(0);
|
||||
}
|
||||
|
179
src/api/api_qe.cpp
Normal file
179
src/api/api_qe.cpp
Normal file
|
@ -0,0 +1,179 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
api_qe.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
Model-based Projection (MBP) and Quantifier Elimination (QE) API
|
||||
|
||||
Author:
|
||||
|
||||
Arie Gurfinkel (arie)
|
||||
|
||||
Notes:
|
||||
|
||||
--*/
|
||||
|
||||
#include <iostream>
|
||||
#include "z3.h"
|
||||
#include "api_log_macros.h"
|
||||
#include "api_context.h"
|
||||
#include "api_util.h"
|
||||
#include "api_model.h"
|
||||
#include "api_ast_map.h"
|
||||
#include "api_ast_vector.h"
|
||||
|
||||
#include "qe_vartest.h"
|
||||
#include "qe_lite.h"
|
||||
#include "spacer_util.h"
|
||||
|
||||
#include "expr_map.h"
|
||||
|
||||
extern "C"
|
||||
{
|
||||
Z3_ast Z3_API Z3_qe_model_project (Z3_context c,
|
||||
Z3_model m,
|
||||
unsigned num_bounds,
|
||||
Z3_app const bound[],
|
||||
Z3_ast body)
|
||||
{
|
||||
Z3_TRY;
|
||||
LOG_Z3_qe_model_project (c, m, num_bounds, bound, body);
|
||||
RESET_ERROR_CODE();
|
||||
|
||||
app_ref_vector vars(mk_c(c)->m ());
|
||||
for (unsigned i = 0; i < num_bounds; ++i)
|
||||
{
|
||||
app *a = to_app (bound [i]);
|
||||
if (a->get_kind () != AST_APP)
|
||||
{
|
||||
SET_ERROR_CODE (Z3_INVALID_ARG);
|
||||
RETURN_Z3(0);
|
||||
}
|
||||
vars.push_back (a);
|
||||
}
|
||||
|
||||
expr_ref result (mk_c(c)->m ());
|
||||
result = to_expr (body);
|
||||
model_ref model (to_model_ref (m));
|
||||
spacer::qe_project (mk_c(c)->m (), vars, result, model);
|
||||
mk_c(c)->save_ast_trail (result.get ());
|
||||
|
||||
return of_expr (result.get ());
|
||||
Z3_CATCH_RETURN(0);
|
||||
}
|
||||
|
||||
Z3_ast Z3_API Z3_qe_model_project_skolem (Z3_context c,
|
||||
Z3_model m,
|
||||
unsigned num_bounds,
|
||||
Z3_app const bound[],
|
||||
Z3_ast body,
|
||||
Z3_ast_map map)
|
||||
{
|
||||
Z3_TRY;
|
||||
LOG_Z3_qe_model_project_skolem (c, m, num_bounds, bound, body, map);
|
||||
RESET_ERROR_CODE();
|
||||
|
||||
ast_manager& man = mk_c(c)->m ();
|
||||
app_ref_vector vars(man);
|
||||
for (unsigned i = 0; i < num_bounds; ++i)
|
||||
{
|
||||
app *a = to_app (bound [i]);
|
||||
if (a->get_kind () != AST_APP)
|
||||
{
|
||||
SET_ERROR_CODE (Z3_INVALID_ARG);
|
||||
RETURN_Z3(0);
|
||||
}
|
||||
vars.push_back (a);
|
||||
}
|
||||
|
||||
expr_ref result (mk_c(c)->m ());
|
||||
result = to_expr (body);
|
||||
model_ref model (to_model_ref (m));
|
||||
expr_map emap (man);
|
||||
|
||||
spacer::qe_project (mk_c(c)->m (), vars, result, model, emap);
|
||||
mk_c(c)->save_ast_trail (result.get ());
|
||||
|
||||
obj_map<ast, ast*> &map_z3 = to_ast_map_ref(map);
|
||||
|
||||
for (expr_map::iterator it = emap.begin(), end = emap.end(); it != end; ++it){
|
||||
man.inc_ref(&(it->get_key()));
|
||||
man.inc_ref(it->get_value());
|
||||
map_z3.insert(&(it->get_key()), it->get_value());
|
||||
}
|
||||
|
||||
return of_expr (result.get ());
|
||||
Z3_CATCH_RETURN(0);
|
||||
}
|
||||
|
||||
Z3_ast Z3_API Z3_model_extrapolate (Z3_context c,
|
||||
Z3_model m,
|
||||
Z3_ast fml)
|
||||
{
|
||||
Z3_TRY;
|
||||
LOG_Z3_model_extrapolate (c, m, fml);
|
||||
RESET_ERROR_CODE();
|
||||
|
||||
model_ref model (to_model_ref (m));
|
||||
expr_ref_vector facts (mk_c(c)->m ());
|
||||
facts.push_back (to_expr (fml));
|
||||
flatten_and (facts);
|
||||
|
||||
spacer::model_evaluator_util mev (mk_c(c)->m());
|
||||
mev.set_model (*model);
|
||||
|
||||
expr_ref_vector lits (mk_c(c)->m());
|
||||
spacer::compute_implicant_literals (mev, facts, lits);
|
||||
|
||||
expr_ref result (mk_c(c)->m ());
|
||||
result = mk_and (lits);
|
||||
mk_c(c)->save_ast_trail (result.get ());
|
||||
|
||||
return of_expr (result.get ());
|
||||
Z3_CATCH_RETURN(0);
|
||||
}
|
||||
|
||||
Z3_ast Z3_API Z3_qe_lite (Z3_context c, Z3_ast_vector vars, Z3_ast body)
|
||||
{
|
||||
Z3_TRY;
|
||||
LOG_Z3_qe_lite (c, vars, body);
|
||||
RESET_ERROR_CODE();
|
||||
ast_ref_vector &vVars = to_ast_vector_ref (vars);
|
||||
|
||||
app_ref_vector vApps (mk_c(c)->m());
|
||||
for (unsigned i = 0; i < vVars.size (); ++i)
|
||||
{
|
||||
app *a = to_app (vVars.get (i));
|
||||
if (a->get_kind () != AST_APP)
|
||||
{
|
||||
SET_ERROR_CODE (Z3_INVALID_ARG);
|
||||
RETURN_Z3(0);
|
||||
}
|
||||
vApps.push_back (a);
|
||||
}
|
||||
|
||||
expr_ref result (mk_c(c)->m ());
|
||||
result = to_expr (body);
|
||||
|
||||
params_ref p;
|
||||
qe_lite qe (mk_c(c)->m (), p);
|
||||
qe (vApps, result);
|
||||
|
||||
// -- copy back variables that were not eliminated
|
||||
if (vApps.size () < vVars.size ())
|
||||
{
|
||||
vVars.reset ();
|
||||
for (unsigned i = 0; i < vApps.size (); ++i)
|
||||
vVars.push_back (vApps.get (i));
|
||||
}
|
||||
|
||||
mk_c(c)->save_ast_trail (result.get ());
|
||||
return of_expr (result);
|
||||
Z3_CATCH_RETURN(0);
|
||||
}
|
||||
|
||||
}
|
|
@ -6536,6 +6536,22 @@ class Fixedpoint(Z3PPObject):
|
|||
r = Z3_fixedpoint_query(self.ctx.ref(), self.fixedpoint, query.as_ast())
|
||||
return CheckSatResult(r)
|
||||
|
||||
def query_from_lvl (self, lvl, *query):
|
||||
"""Query the fixedpoint engine whether formula is derivable starting at the given query level.
|
||||
"""
|
||||
query = _get_args(query)
|
||||
sz = len(query)
|
||||
if sz >= 1 and isinstance(query[0], FuncDecl):
|
||||
_z3_assert (False, "unsupported")
|
||||
else:
|
||||
if sz == 1:
|
||||
query = query[0]
|
||||
else:
|
||||
query = And(query)
|
||||
query = self.abstract(query, False)
|
||||
r = Z3_fixedpoint_query_from_lvl (self.ctx.ref(), self.fixedpoint, query.as_ast(), lvl)
|
||||
return CheckSatResult(r)
|
||||
|
||||
def push(self):
|
||||
"""create a backtracking point for added rules, facts and assertions"""
|
||||
Z3_fixedpoint_push(self.ctx.ref(), self.fixedpoint)
|
||||
|
@ -6558,6 +6574,23 @@ class Fixedpoint(Z3PPObject):
|
|||
r = Z3_fixedpoint_get_answer(self.ctx.ref(), self.fixedpoint)
|
||||
return _to_expr_ref(r, self.ctx)
|
||||
|
||||
def get_ground_sat_answer(self):
|
||||
"""Retrieve a ground cex from last query call."""
|
||||
r = Z3_fixedpoint_get_ground_sat_answer(self.ctx.ref(), self.fixedpoint)
|
||||
return _to_expr_ref(r, self.ctx)
|
||||
|
||||
def get_rules_along_trace(self):
|
||||
"""retrieve rules along the counterexample trace"""
|
||||
return AstVector(Z3_fixedpoint_get_rules_along_trace(self.ctx.ref(), self.fixedpoint), self.ctx)
|
||||
|
||||
def get_rule_names_along_trace(self):
|
||||
"""retrieve rule names along the counterexample trace"""
|
||||
# this is a hack as I don't know how to return a list of symbols from C++;
|
||||
# obtain names as a single string separated by semicolons
|
||||
names = _symbol2py (self.ctx, Z3_fixedpoint_get_rule_names_along_trace(self.ctx.ref(), self.fixedpoint))
|
||||
# split into individual names
|
||||
return names.split (';')
|
||||
|
||||
def get_num_levels(self, predicate):
|
||||
"""Retrieve number of levels used for predicate in PDR engine"""
|
||||
return Z3_fixedpoint_get_num_levels(self.ctx.ref(), self.fixedpoint, predicate.ast)
|
||||
|
|
|
@ -33,5 +33,6 @@ Notes:
|
|||
#include "z3_interp.h"
|
||||
#include "z3_fpa.h"
|
||||
|
||||
#include"z3_spacer.h"
|
||||
#endif
|
||||
|
||||
|
|
143
src/api/z3_spacer.h
Normal file
143
src/api/z3_spacer.h
Normal file
|
@ -0,0 +1,143 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
z3_spacer.h
|
||||
|
||||
Abstract:
|
||||
|
||||
Spacer API
|
||||
|
||||
Author:
|
||||
|
||||
Arie Gurfinkel (arie)
|
||||
|
||||
Notes:
|
||||
|
||||
--*/
|
||||
#ifndef Z3_SPACER_H_
|
||||
#define Z3_SPACER_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif // __cplusplus
|
||||
|
||||
/** \defgroup capi C API */
|
||||
/*@{*/
|
||||
|
||||
/** @name Spacer facilities */
|
||||
/*@{*/
|
||||
/**
|
||||
\brief Pose a query against the asserted rules at the given level.
|
||||
|
||||
\code
|
||||
query ::= (exists (bound-vars) query)
|
||||
| literals
|
||||
\endcode
|
||||
|
||||
query returns
|
||||
- Z3_L_FALSE if the query is unsatisfiable.
|
||||
- Z3_L_TRUE if the query is satisfiable. Obtain the answer by calling #Z3_fixedpoint_get_answer.
|
||||
- Z3_L_UNDEF if the query was interrupted, timed out or otherwise failed.
|
||||
|
||||
def_API('Z3_fixedpoint_query_from_lvl', INT, (_in(CONTEXT), _in(FIXEDPOINT), _in(AST), _in(UINT)))
|
||||
*/
|
||||
Z3_lbool Z3_API Z3_fixedpoint_query_from_lvl (Z3_context c,Z3_fixedpoint d, Z3_ast query, unsigned lvl);
|
||||
|
||||
/**
|
||||
\brief Retrieve a bottom-up (from query) sequence of ground facts
|
||||
|
||||
The previous call to Z3_fixedpoint_query must have returned Z3_L_TRUE.
|
||||
|
||||
def_API('Z3_fixedpoint_get_ground_sat_answer', AST, (_in(CONTEXT), _in(FIXEDPOINT)))
|
||||
*/
|
||||
Z3_ast Z3_API Z3_fixedpoint_get_ground_sat_answer(Z3_context c,Z3_fixedpoint d);
|
||||
|
||||
/**
|
||||
\brief Obtain the list of rules along the counterexample trace.
|
||||
|
||||
def_API('Z3_fixedpoint_get_rules_along_trace', AST_VECTOR, (_in(CONTEXT), _in(FIXEDPOINT)))
|
||||
*/
|
||||
Z3_ast_vector Z3_API Z3_fixedpoint_get_rules_along_trace(Z3_context c,Z3_fixedpoint d);
|
||||
|
||||
/**
|
||||
\brief Obtain the list of rules along the counterexample trace.
|
||||
|
||||
def_API('Z3_fixedpoint_get_rule_names_along_trace', SYMBOL, (_in(CONTEXT), _in(FIXEDPOINT)))
|
||||
*/
|
||||
Z3_symbol Z3_API Z3_fixedpoint_get_rule_names_along_trace(Z3_context c,Z3_fixedpoint d);
|
||||
|
||||
/**
|
||||
\brief Add an invariant for the predicate \c pred.
|
||||
Add an assumed invariant of predicate \c pred.
|
||||
|
||||
Note: this functionality is Spacer specific.
|
||||
|
||||
def_API('Z3_fixedpoint_add_invariant', VOID, (_in(CONTEXT), _in(FIXEDPOINT), _in(FUNC_DECL), _in(AST)))
|
||||
*/
|
||||
void Z3_API Z3_fixedpoint_add_invariant(Z3_context c, Z3_fixedpoint d, Z3_func_decl pred, Z3_ast property);
|
||||
|
||||
|
||||
/**
|
||||
Retrieve reachable states of a predicate.
|
||||
Note: this functionality is Spacer specific.
|
||||
|
||||
def_API('Z3_fixedpoint_get_reachable', AST, (_in(CONTEXT), _in(FIXEDPOINT), _in(FUNC_DECL)))
|
||||
*/
|
||||
Z3_ast Z3_API Z3_fixedpoint_get_reachable(Z3_context c, Z3_fixedpoint d, Z3_func_decl pred);
|
||||
|
||||
/**
|
||||
\brief Project variables given a model
|
||||
|
||||
def_API('Z3_qe_model_project', AST, (_in(CONTEXT), _in(MODEL), _in(UINT), _in_array(2, APP), _in(AST)))
|
||||
*/
|
||||
Z3_ast Z3_API Z3_qe_model_project
|
||||
(Z3_context c,
|
||||
Z3_model m,
|
||||
unsigned num_bounds,
|
||||
Z3_app const bound[],
|
||||
Z3_ast body);
|
||||
|
||||
|
||||
/**
|
||||
\brief Project variables given a model
|
||||
|
||||
def_API('Z3_qe_model_project_skolem', AST, (_in(CONTEXT), _in(MODEL), _in(UINT), _in_array(2, APP), _in(AST), _in(AST_MAP)))
|
||||
*/
|
||||
Z3_ast Z3_API Z3_qe_model_project_skolem
|
||||
(Z3_context c,
|
||||
Z3_model m,
|
||||
unsigned num_bounds,
|
||||
Z3_app const bound[],
|
||||
Z3_ast body,
|
||||
Z3_ast_map map);
|
||||
|
||||
/**
|
||||
\brief Extrapolates a model of a formula
|
||||
|
||||
def_API('Z3_model_extrapolate', AST, (_in(CONTEXT), _in(MODEL), _in(AST)))
|
||||
*/
|
||||
Z3_ast Z3_API Z3_model_extrapolate
|
||||
(Z3_context c,
|
||||
Z3_model m,
|
||||
Z3_ast fml);
|
||||
|
||||
/**
|
||||
\brief Best-effort quantifier elimination
|
||||
|
||||
def_API ('Z3_qe_lite', AST, (_in(CONTEXT), _in(AST_VECTOR), _in(AST)))
|
||||
*/
|
||||
Z3_ast Z3_qe_lite
|
||||
(Z3_context c,
|
||||
Z3_ast_vector vars,
|
||||
Z3_ast body);
|
||||
|
||||
/*@}*/
|
||||
/*@}*/
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif
|
|
@ -229,6 +229,7 @@ namespace datalog {
|
|||
m_enable_bind_variables(true),
|
||||
m_last_status(OK),
|
||||
m_last_answer(m),
|
||||
m_last_ground_answer(m),
|
||||
m_engine_type(LAST_ENGINE) {
|
||||
re.set_context(this);
|
||||
updt_params(pa);
|
||||
|
@ -306,6 +307,8 @@ namespace datalog {
|
|||
bool context::compress_unbound() const { return m_params->xform_compress_unbound(); }
|
||||
bool context::quantify_arrays() const { return m_params->xform_quantify_arrays(); }
|
||||
bool context::instantiate_quantifiers() const { return m_params->xform_instantiate_quantifiers(); }
|
||||
bool context::array_blast() const { return m_params->xform_array_blast(); }
|
||||
bool context::array_blast_full() const { return m_params->xform_array_blast_full(); }
|
||||
|
||||
|
||||
void context::register_finite_sort(sort * s, sort_kind k) {
|
||||
|
@ -546,10 +549,20 @@ namespace datalog {
|
|||
return m_engine->get_cover_delta(level, pred);
|
||||
}
|
||||
|
||||
expr_ref context::get_reachable(func_decl *pred) {
|
||||
ensure_engine();
|
||||
return m_engine->get_reachable(pred);
|
||||
}
|
||||
void context::add_cover(int level, func_decl* pred, expr* property) {
|
||||
ensure_engine();
|
||||
m_engine->add_cover(level, pred, property);
|
||||
}
|
||||
|
||||
void context::add_invariant(func_decl* pred, expr *property)
|
||||
{
|
||||
ensure_engine();
|
||||
m_engine->add_invariant(pred, property);
|
||||
}
|
||||
|
||||
void context::check_rules(rule_set& r) {
|
||||
m_rule_properties.set_generate_proof(generate_proof_trace());
|
||||
|
@ -561,6 +574,7 @@ namespace datalog {
|
|||
m_rule_properties.check_nested_free();
|
||||
m_rule_properties.check_infinite_sorts();
|
||||
break;
|
||||
case SPACER_ENGINE:
|
||||
case PDR_ENGINE:
|
||||
m_rule_properties.collect(r);
|
||||
m_rule_properties.check_existential_tail();
|
||||
|
@ -792,6 +806,9 @@ namespace datalog {
|
|||
if (e == symbol("datalog")) {
|
||||
m_engine_type = DATALOG_ENGINE;
|
||||
}
|
||||
else if (e == symbol("spacer")) {
|
||||
m_engine_type = SPACER_ENGINE;
|
||||
}
|
||||
else if (e == symbol("pdr")) {
|
||||
m_engine_type = PDR_ENGINE;
|
||||
}
|
||||
|
@ -844,8 +861,10 @@ namespace datalog {
|
|||
m_mc = mk_skip_model_converter();
|
||||
m_last_status = OK;
|
||||
m_last_answer = 0;
|
||||
m_last_ground_answer = 0;
|
||||
switch (get_engine()) {
|
||||
case DATALOG_ENGINE:
|
||||
case SPACER_ENGINE:
|
||||
case PDR_ENGINE:
|
||||
case QPDR_ENGINE:
|
||||
case BMC_ENGINE:
|
||||
|
@ -867,6 +886,28 @@ namespace datalog {
|
|||
return m_engine->query(query);
|
||||
}
|
||||
|
||||
lbool context::query_from_lvl (expr* query, unsigned lvl) {
|
||||
m_mc = mk_skip_model_converter();
|
||||
m_last_status = OK;
|
||||
m_last_answer = 0;
|
||||
m_last_ground_answer = 0;
|
||||
switch (get_engine()) {
|
||||
case DATALOG_ENGINE:
|
||||
case SPACER_ENGINE:
|
||||
case PDR_ENGINE:
|
||||
case QPDR_ENGINE:
|
||||
case BMC_ENGINE:
|
||||
case QBMC_ENGINE:
|
||||
case TAB_ENGINE:
|
||||
case CLP_ENGINE:
|
||||
flush_add_rules();
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
ensure_engine();
|
||||
return m_engine->query_from_lvl (query, lvl);
|
||||
}
|
||||
model_ref context::get_model() {
|
||||
ensure_engine();
|
||||
return m_engine->get_model();
|
||||
|
@ -905,6 +946,42 @@ namespace datalog {
|
|||
return m_last_answer.get();
|
||||
}
|
||||
|
||||
expr* context::get_ground_sat_answer () {
|
||||
if (m_last_ground_answer) {
|
||||
return m_last_ground_answer;
|
||||
}
|
||||
ensure_engine ();
|
||||
m_last_ground_answer = m_engine->get_ground_sat_answer ();
|
||||
return m_last_ground_answer;
|
||||
}
|
||||
|
||||
void context::get_rules_along_trace (rule_ref_vector& rules) {
|
||||
ensure_engine ();
|
||||
m_engine->get_rules_along_trace (rules);
|
||||
}
|
||||
|
||||
void context::get_rules_along_trace_as_formulas (expr_ref_vector& rules, svector<symbol>& names) {
|
||||
rule_manager& rm = get_rule_manager ();
|
||||
rule_ref_vector rv (rm);
|
||||
get_rules_along_trace (rv);
|
||||
expr_ref fml (m);
|
||||
rule_ref_vector::iterator it = rv.begin (), end = rv.end ();
|
||||
for (; it != end; it++) {
|
||||
m_rule_manager.to_formula (**it, fml);
|
||||
rules.push_back (fml);
|
||||
// The concatenated names are already stored last-first, so do not need to be reversed here
|
||||
const symbol& rule_name = (*it)->name();
|
||||
names.push_back (rule_name);
|
||||
|
||||
TRACE ("dl",
|
||||
if (rule_name == symbol::null) {
|
||||
tout << "Encountered unnamed rule: ";
|
||||
(*it)->display(*this, tout);
|
||||
tout << "\n";
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void context::display_certificate(std::ostream& out) {
|
||||
ensure_engine();
|
||||
m_engine->display_certificate(out);
|
||||
|
|
|
@ -207,6 +207,7 @@ namespace datalog {
|
|||
bool m_enable_bind_variables;
|
||||
execution_result m_last_status;
|
||||
expr_ref m_last_answer;
|
||||
expr_ref m_last_ground_answer;
|
||||
DL_ENGINE m_engine_type;
|
||||
|
||||
|
||||
|
@ -277,6 +278,8 @@ namespace datalog {
|
|||
bool xform_bit_blast() const;
|
||||
bool xform_slice() const;
|
||||
bool xform_coi() const;
|
||||
bool array_blast() const;
|
||||
bool array_blast_full() const;
|
||||
|
||||
void register_finite_sort(sort * s, sort_kind k);
|
||||
|
||||
|
@ -407,6 +410,10 @@ namespace datalog {
|
|||
*/
|
||||
unsigned get_num_levels(func_decl* pred);
|
||||
|
||||
/**
|
||||
Retrieve reachable facts of 'pred'.
|
||||
*/
|
||||
expr_ref get_reachable(func_decl *pred);
|
||||
/**
|
||||
Retrieve the current cover of 'pred' up to 'level' unfoldings.
|
||||
Return just the delta that is known at 'level'. To
|
||||
|
@ -421,6 +428,11 @@ namespace datalog {
|
|||
*/
|
||||
void add_cover(int level, func_decl* pred, expr* property);
|
||||
|
||||
/**
|
||||
Add an invariant of predicate 'pred'.
|
||||
*/
|
||||
void add_invariant (func_decl *pred, expr *property);
|
||||
|
||||
/**
|
||||
\brief Check rule subsumption.
|
||||
*/
|
||||
|
@ -509,6 +521,7 @@ namespace datalog {
|
|||
|
||||
lbool query(expr* q);
|
||||
|
||||
lbool query_from_lvl (expr* q, unsigned lvl);
|
||||
/**
|
||||
\brief retrieve model from inductive invariant that shows query is unsat.
|
||||
|
||||
|
@ -545,6 +558,18 @@ namespace datalog {
|
|||
in the query that are derivable.
|
||||
*/
|
||||
expr* get_answer_as_formula();
|
||||
/**
|
||||
* get bottom-up (from query) sequence of ground predicate instances
|
||||
* (for e.g. P(0,1,0,0,3)) that together form a ground derivation to query
|
||||
*/
|
||||
expr* get_ground_sat_answer ();
|
||||
|
||||
/**
|
||||
* \brief obtain the sequence of rules along the counterexample trace
|
||||
*/
|
||||
void get_rules_along_trace (rule_ref_vector& rules);
|
||||
|
||||
void get_rules_along_trace_as_formulas (expr_ref_vector& rules, svector<symbol>& names);
|
||||
|
||||
|
||||
void collect_statistics(statistics& st) const;
|
||||
|
|
|
@ -20,11 +20,13 @@ Revision History:
|
|||
#define DL_ENGINE_BASE_H_
|
||||
|
||||
#include "model/model.h"
|
||||
#include "muz/base/dl_util.h"
|
||||
|
||||
namespace datalog {
|
||||
enum DL_ENGINE {
|
||||
DATALOG_ENGINE,
|
||||
PDR_ENGINE,
|
||||
SPACER_ENGINE,
|
||||
QPDR_ENGINE,
|
||||
BMC_ENGINE,
|
||||
QBMC_ENGINE,
|
||||
|
@ -43,6 +45,9 @@ namespace datalog {
|
|||
virtual ~engine_base() {}
|
||||
|
||||
virtual expr_ref get_answer() = 0;
|
||||
virtual expr_ref get_ground_sat_answer () {
|
||||
throw default_exception(std::string("operation is not supported for ") + m_name);
|
||||
}
|
||||
virtual lbool query(expr* q) = 0;
|
||||
virtual lbool query(unsigned num_rels, func_decl*const* rels) {
|
||||
if (num_rels != 1) return l_undef;
|
||||
|
@ -64,6 +69,9 @@ namespace datalog {
|
|||
}
|
||||
return query(q);
|
||||
}
|
||||
virtual lbool query_from_lvl (expr* q, unsigned lvl) {
|
||||
throw default_exception(std::string("operation is not supported for ") + m_name);
|
||||
}
|
||||
|
||||
virtual void reset_statistics() {}
|
||||
virtual void display_profile(std::ostream& out) {}
|
||||
|
@ -71,18 +79,27 @@ namespace datalog {
|
|||
virtual unsigned get_num_levels(func_decl* pred) {
|
||||
throw default_exception(std::string("get_num_levels is not supported for ") + m_name);
|
||||
}
|
||||
virtual expr_ref get_reachable(func_decl* pred) {
|
||||
throw default_exception(std::string("operation is not supported for ") + m_name);
|
||||
}
|
||||
virtual expr_ref get_cover_delta(int level, func_decl* pred) {
|
||||
throw default_exception(std::string("operation is not supported for ") + m_name);
|
||||
}
|
||||
virtual void add_cover(int level, func_decl* pred, expr* property) {
|
||||
throw default_exception(std::string("operation is not supported for ") + m_name);
|
||||
}
|
||||
virtual void add_invariant (func_decl *pred, expr *property) {
|
||||
throw default_exception(std::string("operation is not supported for ") + m_name);
|
||||
}
|
||||
virtual void display_certificate(std::ostream& out) const {
|
||||
throw default_exception(std::string("certificates are not supported for ") + m_name);
|
||||
}
|
||||
virtual model_ref get_model() {
|
||||
return model_ref(alloc(model, m));
|
||||
}
|
||||
virtual void get_rules_along_trace (rule_ref_vector& rules) {
|
||||
throw default_exception(std::string("get_rules_along_trace is not supported for ") + m_name);
|
||||
}
|
||||
virtual proof_ref get_proof() {
|
||||
return proof_ref(m.mk_asserted(m.mk_true()), m);
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ namespace datalog {
|
|||
m_head(m),
|
||||
m_args(m),
|
||||
m_hnf(m),
|
||||
m_qe(m, params_ref()),
|
||||
m_qe(m, params_ref(), false),
|
||||
m_rwr(m),
|
||||
m_ufproc(m) {}
|
||||
|
||||
|
@ -639,7 +639,7 @@ namespace datalog {
|
|||
tail.push_back(ensure_app(conjs[i].get()));
|
||||
}
|
||||
tail_neg.resize(tail.size(), false);
|
||||
r = mk(r->get_head(), tail.size(), tail.c_ptr(), tail_neg.c_ptr());
|
||||
r = mk(r->get_head(), tail.size(), tail.c_ptr(), tail_neg.c_ptr(), r->name());
|
||||
TRACE("dl", r->display(m_ctx, tout << "reduced rule\n"););
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,6 +52,7 @@ namespace datalog {
|
|||
~verbose_action();
|
||||
};
|
||||
|
||||
typedef ref_vector<rule, rule_manager> rule_ref_vector;
|
||||
enum PDR_CACHE_MODE {
|
||||
NO_CACHE,
|
||||
HASH_CACHE,
|
||||
|
|
|
@ -3,7 +3,7 @@ def_module_params('fixedpoint',
|
|||
export=True,
|
||||
params=(('timeout', UINT, UINT_MAX, 'set timeout'),
|
||||
('engine', SYMBOL, 'auto-config',
|
||||
'Select: auto-config, datalog, duality, pdr, bmc'),
|
||||
'Select: auto-config, datalog, duality, pdr, bmc, spacer'),
|
||||
('datalog.default_table', SYMBOL, 'sparse',
|
||||
'default table implementation: sparse, hashtable, bitvector, interval'),
|
||||
('datalog.default_relation', SYMBOL, 'pentagon',
|
||||
|
@ -54,6 +54,8 @@ def_module_params('fixedpoint',
|
|||
"if true, finite_product_relation will attempt to avoid creating " +
|
||||
"inner relation with empty signature by putting in half of the " +
|
||||
"table columns, if it would have been empty otherwise"),
|
||||
('datalog.subsumption', BOOL, True,
|
||||
"if true, removes/filters predicates with total transitions"),
|
||||
('duality.full_expand', BOOL, False, 'Fully expand derivation trees'),
|
||||
('duality.no_conj', BOOL, False, 'No forced covering (conjectures)'),
|
||||
('duality.feasible_edges', BOOL, True,
|
||||
|
@ -74,6 +76,8 @@ def_module_params('fixedpoint',
|
|||
('pdr.flexible_trace', BOOL, False,
|
||||
"allow PDR generate long counter-examples " +
|
||||
"by extending candidate trace within search area"),
|
||||
('pdr.flexible_trace_depth', UINT, UINT_MAX,
|
||||
'Controls the depth (below the current level) at which flexible trace can be applied'),
|
||||
('pdr.use_model_generalizer', BOOL, False,
|
||||
"use model for backwards propagation (instead of symbolic simulation)"),
|
||||
('pdr.validate_result', BOOL, False,
|
||||
|
@ -138,13 +142,65 @@ def_module_params('fixedpoint',
|
|||
('xform.slice', BOOL, True, "simplify clause set using slicing"),
|
||||
('xform.karr', BOOL, False,
|
||||
"Add linear invariants to clauses using Karr's method"),
|
||||
('spacer.use_eqclass', BOOL, False, "Generalizes equalities to equivalence classes"),
|
||||
('xform.transform_arrays', BOOL, False,
|
||||
"Rewrites arrays equalities and applies select over store"),
|
||||
('xform.instantiate_arrays', BOOL, False,
|
||||
"Transforms P(a) into P(i, a[i] a)"),
|
||||
('xform.instantiate_arrays.enforce', BOOL, False,
|
||||
"Transforms P(a) into P(i, a[i]), discards a from predicate"),
|
||||
('xform.instantiate_arrays.nb_quantifier', UINT, 1,
|
||||
"Gives the number of quantifiers per array"),
|
||||
('xform.instantiate_arrays.slice_technique', SYMBOL, "no-slicing",
|
||||
"<no-slicing>=> GetId(i) = i, <smash> => GetId(i) = true"),
|
||||
('xform.quantify_arrays', BOOL, False,
|
||||
"create quantified Horn clauses from clauses with arrays"),
|
||||
('xform.instantiate_quantifiers', BOOL, False,
|
||||
"instantiate quantified Horn clauses using E-matching heuristic"),
|
||||
('xform.coalesce_rules', BOOL, False, "coalesce rules"),
|
||||
('xform.tail_simplifier_pve', BOOL, True, "propagate_variable_equivalences"),
|
||||
('xform.subsumption_checker', BOOL, True, "Enable subsumption checker (no support for model conversion)"),
|
||||
('xform.coi', BOOL, True, "use cone of influence simplificaiton"),
|
||||
('duality.enable_restarts', BOOL, False, 'DUALITY: enable restarts'),
|
||||
('spacer.order_children', UINT, 0, 'SPACER: order of enqueuing children in non-linear rules : 0 (original), 1 (reverse)'),
|
||||
('spacer.eager_reach_check', BOOL, True, 'SPACER: eagerly check if a query is reachable using reachability facts of predecessors'),
|
||||
('spacer.use_lemma_as_cti', BOOL, False, 'SPACER: use a lemma instead of a CTI in flexible_trace'),
|
||||
('spacer.reset_obligation_queue', BOOL, True, 'SPACER: reset obligation queue when entering a new level'),
|
||||
('spacer.init_reach_facts', BOOL, True, 'SPACER: initialize reachability facts with false'),
|
||||
('spacer.use_array_eq_generalizer', BOOL, True, 'SPACER: attempt to generalize lemmas with array equalities'),
|
||||
('spacer.use_derivations', BOOL, True, 'SPACER: using derivation mechanism to cache intermediate results for non-linear rules'),
|
||||
('xform.array_blast', BOOL, False, "try to eliminate local array terms using Ackermannization -- some array terms may remain"),
|
||||
('xform.array_blast_full', BOOL, False, "eliminate all local array variables by QE"),
|
||||
('spacer.skip_propagate', BOOL, False, "Skip propagate/pushing phase. Turns PDR into a BMC that returns either reachable or unknown"),
|
||||
('spacer.max_level', UINT, UINT_MAX, "Maximum level to explore"),
|
||||
('spacer.elim_aux', BOOL, True, "Eliminate auxiliary variables in reachability facts"),
|
||||
('spacer.reach_as_init', BOOL, True, "Extend initial rules with computed reachability facts"),
|
||||
('spacer.blast_term_ite', BOOL, True, "Expand non-Boolean ite-terms"),
|
||||
('spacer.nondet_tie_break', BOOL, False, "Break ties in obligation queue non-deterministicly"),
|
||||
('spacer.reach_dnf', BOOL, True, "Restrict reachability facts to DNF"),
|
||||
('bmc.linear_unrolling_depth', UINT, UINT_MAX, "Maximal level to explore"),
|
||||
('spacer.split_farkas_literals', BOOL, False, "Split Farkas literals"),
|
||||
('spacer.native_mbp', BOOL, False, "Use native mbp of Z3"),
|
||||
('spacer.eq_prop', BOOL, True, "Enable equality and bound propagation in arithmetic"),
|
||||
('spacer.weak_abs', BOOL, True, "Weak abstraction"),
|
||||
('spacer.restarts', BOOL, False, "Enable reseting obligation queue"),
|
||||
('spacer.restart_initial_threshold', UINT, 10, "Intial threshold for restarts"),
|
||||
('spacer.random_seed', UINT, 0, "Random seed to be used by SMT solver"),
|
||||
('spacer.ground_cti', BOOL, True, "Require CTI to be ground"),
|
||||
('spacer.vs.dump_benchmarks', BOOL, False, 'dump benchmarks in virtual solver'),
|
||||
('spacer.vs.dump_min_time', DOUBLE, 5.0, 'min time to dump benchmark'),
|
||||
('spacer.vs.recheck', BOOL, False, 're-check locally during benchmark dumping'),
|
||||
('spacer.mbqi', BOOL, True, 'use model-based quantifier instantiation'),
|
||||
('spacer.keep_proxy', BOOL, True, 'keep proxy variables (internal parameter)'),
|
||||
('spacer.instantiate', BOOL, True, 'instantiate quantified lemmas'),
|
||||
('spacer.qlemmas', BOOL, True, 'allow quantified lemmas in frames'),
|
||||
('spacer.new_unsat_core', BOOL, True, 'use the new implementation of unsat-core-generation'),
|
||||
('spacer.minimize_unsat_core', BOOL, False, 'compute unsat-core by min-cut'),
|
||||
('spacer.farkas_optimized', BOOL, True, 'use the optimized farkas plugin, which performs gaussian elimination'),
|
||||
('spacer.farkas_a_const', BOOL, True, 'if the unoptimized farkas plugin is used, use the constants from A while constructing unsat_cores'),
|
||||
('spacer.lemma_sanity_check', BOOL, False, 'check during generalization whether lemma is actually correct'),
|
||||
('spacer.reuse_pobs', BOOL, True, 'reuse POBs'),
|
||||
('spacer.simplify_pob', BOOL, False, 'simplify POBs by removing redundant constraints')
|
||||
))
|
||||
|
||||
|
||||
|
|
|
@ -12,12 +12,19 @@ Copyright (c) 2015 Microsoft Corporation
|
|||
class reduce_hypotheses {
|
||||
typedef obj_hashtable<expr> expr_set;
|
||||
ast_manager& m;
|
||||
// reference for any expression created by the tranformation
|
||||
expr_ref_vector m_refs;
|
||||
// currently computed result
|
||||
obj_map<proof,proof*> m_cache;
|
||||
// map conclusions to closed proofs that derive them
|
||||
obj_map<expr, proof*> m_units;
|
||||
// currently active units
|
||||
ptr_vector<expr> m_units_trail;
|
||||
// size of m_units_trail at the last push
|
||||
unsigned_vector m_limits;
|
||||
// map from proofs to active hypotheses
|
||||
obj_map<proof, expr_set*> m_hypmap;
|
||||
// refernce train for hypotheses sets
|
||||
ptr_vector<expr_set> m_hyprefs;
|
||||
ptr_vector<expr> m_literals;
|
||||
|
||||
|
@ -151,19 +158,33 @@ public:
|
|||
p = result;
|
||||
return;
|
||||
}
|
||||
//SASSERT (p.get () == result);
|
||||
switch(p->get_decl_kind()) {
|
||||
case PR_HYPOTHESIS:
|
||||
// replace result by m_units[m.get_fact (p)] if defined
|
||||
// AG: This is the main step. Replace a hypothesis by a derivation of its consequence
|
||||
if (!m_units.find(m.get_fact(p), result)) {
|
||||
// restore ther result back to p
|
||||
result = p.get();
|
||||
}
|
||||
// compute hypothesis of the result
|
||||
// not clear what 'result' is at this point.
|
||||
// probably the proof at the top of the call
|
||||
// XXX not clear why this is re-computed each time
|
||||
// XXX moreover, m_units are guaranteed to be closed!
|
||||
// XXX so no hypotheses are needed for them
|
||||
add_hypotheses(result);
|
||||
break;
|
||||
case PR_LEMMA: {
|
||||
SASSERT(m.get_num_parents(p) == 1);
|
||||
tmp = m.get_parent(p, 0);
|
||||
// eliminate hypothesis recursively in the proof of the lemma
|
||||
elim(tmp);
|
||||
expr_set* hyps = m_hypmap.find(tmp);
|
||||
expr_set* new_hyps = 0;
|
||||
// XXX if the proof is correct, the hypotheses of the tmp
|
||||
// XXX should be exactly those of the consequence of the lemma
|
||||
// XXX but if this code actually eliminates hypotheses, the set might be a subset
|
||||
if (hyps) {
|
||||
new_hyps = alloc(expr_set, *hyps);
|
||||
}
|
||||
|
@ -178,13 +199,19 @@ public:
|
|||
get_literals(fact);
|
||||
}
|
||||
|
||||
// go over all the literals in the consequence of the lemma
|
||||
for (unsigned i = 0; i < m_literals.size(); ++i) {
|
||||
expr* e = m_literals[i];
|
||||
// if the literal is not in hypothesis, skip it
|
||||
if (!in_hypotheses(e, hyps)) {
|
||||
m_literals[i] = m_literals.back();
|
||||
m_literals.pop_back();
|
||||
--i;
|
||||
}
|
||||
// if the literal is in hypothesis remove it because
|
||||
// it is not in hypothesis set of the lemma
|
||||
// XXX but we assume that lemmas have empty hypothesis set.
|
||||
// XXX eventually every element of new_hyps must be removed!
|
||||
else {
|
||||
SASSERT(new_hyps);
|
||||
expr_ref not_e = complement_lit(e);
|
||||
|
@ -192,10 +219,13 @@ public:
|
|||
new_hyps->remove(not_e);
|
||||
}
|
||||
}
|
||||
// killed all hypotheses, so can stop at the lemma since
|
||||
// we have a closed pf of false
|
||||
if (m_literals.empty()) {
|
||||
result = tmp;
|
||||
}
|
||||
else {
|
||||
// create a new lemma, but might be re-creating existing one
|
||||
expr_ref clause(m);
|
||||
if (m_literals.size() == 1) {
|
||||
clause = m_literals[0];
|
||||
|
@ -212,6 +242,7 @@ public:
|
|||
new_hyps = 0;
|
||||
}
|
||||
m_hypmap.insert(result, new_hyps);
|
||||
// might push 0 into m_hyprefs. No reason for that
|
||||
m_hyprefs.push_back(new_hyps);
|
||||
TRACE("proof_utils",
|
||||
tout << "New lemma: " << mk_pp(m.get_fact(p), m)
|
||||
|
@ -229,19 +260,27 @@ public:
|
|||
}
|
||||
case PR_UNIT_RESOLUTION: {
|
||||
proof_ref_vector parents(m);
|
||||
// get the clause being resolved with
|
||||
parents.push_back(m.get_parent(p, 0));
|
||||
// save state
|
||||
push();
|
||||
bool found_false = false;
|
||||
// for every derivation of a unit literal
|
||||
for (unsigned i = 1; i < m.get_num_parents(p); ++i) {
|
||||
// see if it derives false
|
||||
tmp = m.get_parent(p, i);
|
||||
elim(tmp);
|
||||
if (m.is_false(m.get_fact(tmp))) {
|
||||
// if derived false, the whole pf is false and we can bail out
|
||||
result = tmp;
|
||||
found_false = true;
|
||||
break;
|
||||
}
|
||||
// -- otherwise, the fact has not changed. nothing to simplify
|
||||
SASSERT(m.get_fact(tmp) == m.get_fact(m.get_parent(p, i)));
|
||||
parents.push_back(tmp);
|
||||
// remember that we have this derivation while we have not poped the trail
|
||||
// but only if the proof is closed (i.e., a real unit)
|
||||
if (is_closed(tmp) && !m_units.contains(m.get_fact(tmp))) {
|
||||
m_units.insert(m.get_fact(tmp), tmp);
|
||||
m_units_trail.push_back(m.get_fact(tmp));
|
||||
|
@ -251,10 +290,15 @@ public:
|
|||
pop();
|
||||
break;
|
||||
}
|
||||
// look at the clause being resolved with
|
||||
tmp = m.get_parent(p, 0);
|
||||
// remember its fact
|
||||
expr* old_clause = m.get_fact(tmp);
|
||||
// attempt to reduce its fact
|
||||
elim(tmp);
|
||||
// update parents
|
||||
parents[0] = tmp;
|
||||
// if the new fact is false, bail out
|
||||
expr* clause = m.get_fact(tmp);
|
||||
if (m.is_false(clause)) {
|
||||
m_refs.push_back(tmp);
|
||||
|
@ -264,8 +308,10 @@ public:
|
|||
}
|
||||
//
|
||||
// case where clause is a literal in the old clause.
|
||||
// i.e., reduce multi-literal clause to a unit
|
||||
//
|
||||
if (is_literal_in_clause(clause, old_clause)) {
|
||||
// if the resulting literal was resolved, get a pf of false and bail out
|
||||
bool found = false;
|
||||
for (unsigned i = 1; !found && i < parents.size(); ++i) {
|
||||
if (m.is_complement(clause, m.get_fact(parents[i].get()))) {
|
||||
|
@ -277,6 +323,7 @@ public:
|
|||
found = true;
|
||||
}
|
||||
}
|
||||
// else if the resulting literal is not resolved, it is the new consequence
|
||||
if (!found) {
|
||||
result = parents[0].get();
|
||||
}
|
||||
|
@ -508,6 +555,11 @@ static void permute_unit_resolution(expr_ref_vector& refs, obj_map<proof,proof*>
|
|||
SASSERT(params[0].is_symbol());
|
||||
family_id tid = m.mk_family_id(params[0].get_symbol());
|
||||
SASSERT(tid != null_family_id);
|
||||
// AG: This can break a theory lemma. In particular, for Farkas lemmas the coefficients
|
||||
// AG: for the literals propagated from the unit resolution are missing.
|
||||
// AG: Why is this a good thing to do?
|
||||
// AG: This can lead to merging of the units with other terms in interpolation,
|
||||
// AG: but without farkas coefficients this does not make sense
|
||||
prNew = m.mk_th_lemma(tid, m.get_fact(pr),
|
||||
premises.size(), premises.c_ptr(), num_params-1, params+1);
|
||||
}
|
||||
|
|
|
@ -33,6 +33,8 @@ Revision History:
|
|||
#include "muz/transforms/dl_mk_rule_inliner.h"
|
||||
#include "ast/scoped_proof.h"
|
||||
|
||||
#include "fixedpoint_params.hpp"
|
||||
|
||||
namespace datalog {
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
|
@ -143,6 +145,7 @@ namespace datalog {
|
|||
b.m_fparams.m_model = true;
|
||||
b.m_fparams.m_model_compact = true;
|
||||
b.m_fparams.m_mbqi = true;
|
||||
b.m_rule_trace.reset();
|
||||
}
|
||||
|
||||
void mk_qrule_vars(datalog::rule const& r, unsigned rule_id, expr_ref_vector& sub) {
|
||||
|
@ -279,6 +282,7 @@ namespace datalog {
|
|||
}
|
||||
SASSERT(r);
|
||||
mk_qrule_vars(*r, i, sub);
|
||||
b.m_rule_trace.push_back(r);
|
||||
// we have rule, we have variable names of rule.
|
||||
|
||||
// extract values for the variables in the rule.
|
||||
|
@ -470,6 +474,7 @@ namespace datalog {
|
|||
b.m_fparams.m_model_compact = true;
|
||||
// b.m_fparams.m_mbqi = true;
|
||||
b.m_fparams.m_relevancy_lvl = 2;
|
||||
b.m_rule_trace.reset();
|
||||
}
|
||||
|
||||
lbool check(unsigned level) {
|
||||
|
@ -507,6 +512,7 @@ namespace datalog {
|
|||
}
|
||||
}
|
||||
SASSERT(r);
|
||||
b.m_rule_trace.push_back(r);
|
||||
rm.to_formula(*r, fml);
|
||||
IF_VERBOSE(1, verbose_stream() << mk_pp(fml, m) << "\n";);
|
||||
prs.push_back(r->get_proof());
|
||||
|
@ -760,6 +766,7 @@ namespace datalog {
|
|||
b.m_fparams.m_model_compact = true;
|
||||
b.m_fparams.m_mbqi = false;
|
||||
b.m_fparams.m_relevancy_lvl = 2;
|
||||
b.m_rule_trace.reset();
|
||||
}
|
||||
|
||||
func_decl_ref mk_predicate(func_decl* pred) {
|
||||
|
@ -1078,6 +1085,7 @@ namespace datalog {
|
|||
}
|
||||
head = rl->get_head();
|
||||
pr = m.mk_hyper_resolve(sz+1, prs.c_ptr(), head, positions, substs);
|
||||
b.m_rule_trace.push_back(rl.get());
|
||||
return pr;
|
||||
}
|
||||
}
|
||||
|
@ -1154,7 +1162,8 @@ namespace datalog {
|
|||
|
||||
lbool check() {
|
||||
setup();
|
||||
for (unsigned i = 0; ; ++i) {
|
||||
unsigned max_depth = b.m_ctx.get_params().bmc_linear_unrolling_depth();
|
||||
for (unsigned i = 0; i < max_depth; ++i) {
|
||||
IF_VERBOSE(1, verbose_stream() << "level: " << i << "\n";);
|
||||
b.checkpoint();
|
||||
compile(i);
|
||||
|
@ -1167,6 +1176,7 @@ namespace datalog {
|
|||
return res;
|
||||
}
|
||||
}
|
||||
return l_undef;
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -1202,6 +1212,7 @@ namespace datalog {
|
|||
}
|
||||
}
|
||||
SASSERT(r);
|
||||
b.m_rule_trace.push_back(r);
|
||||
mk_rule_vars(*r, level, i, sub);
|
||||
// we have rule, we have variable names of rule.
|
||||
|
||||
|
@ -1284,6 +1295,7 @@ namespace datalog {
|
|||
b.m_fparams.m_model_compact = true;
|
||||
b.m_fparams.m_mbqi = false;
|
||||
// m_fparams.m_auto_config = false;
|
||||
b.m_rule_trace.reset();
|
||||
}
|
||||
|
||||
|
||||
|
@ -1426,7 +1438,8 @@ namespace datalog {
|
|||
m_solver(m, m_fparams),
|
||||
m_rules(ctx),
|
||||
m_query_pred(m),
|
||||
m_answer(m) {
|
||||
m_answer(m),
|
||||
m_rule_trace(ctx.get_rule_manager()) {
|
||||
}
|
||||
|
||||
bmc::~bmc() {}
|
||||
|
@ -1530,6 +1543,10 @@ namespace datalog {
|
|||
return m_answer;
|
||||
}
|
||||
|
||||
void bmc::get_rules_along_trace(datalog::rule_ref_vector& rules) {
|
||||
rules.append(m_rule_trace);
|
||||
}
|
||||
|
||||
void bmc::compile(rule_set const& rules, expr_ref_vector& fmls, unsigned level) {
|
||||
nonlinear nl(*this);
|
||||
nl.compile(rules, fmls, level);
|
||||
|
|
|
@ -38,6 +38,7 @@ namespace datalog {
|
|||
rule_set m_rules;
|
||||
func_decl_ref m_query_pred;
|
||||
expr_ref m_answer;
|
||||
rule_ref_vector m_rule_trace;
|
||||
|
||||
void checkpoint();
|
||||
|
||||
|
@ -63,6 +64,7 @@ namespace datalog {
|
|||
void collect_statistics(statistics& st) const;
|
||||
|
||||
void reset_statistics();
|
||||
void get_rules_along_trace(datalog::rule_ref_vector& rules);
|
||||
|
||||
expr_ref get_answer();
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@ z3_add_component(fp
|
|||
muz
|
||||
pdr
|
||||
rel
|
||||
spacer
|
||||
tab
|
||||
TACTIC_HEADERS
|
||||
horn_tactic.h
|
||||
|
|
|
@ -24,6 +24,7 @@ Revision History:
|
|||
#include "muz/pdr/pdr_dl_interface.h"
|
||||
#include "muz/ddnf/ddnf.h"
|
||||
#include "muz/duality/duality_dl_interface.h"
|
||||
#include "muz/spacer/spacer_dl_interface.h"
|
||||
|
||||
namespace datalog {
|
||||
register_engine::register_engine(): m_ctx(0) {}
|
||||
|
@ -33,6 +34,8 @@ namespace datalog {
|
|||
case PDR_ENGINE:
|
||||
case QPDR_ENGINE:
|
||||
return alloc(pdr::dl_interface, *m_ctx);
|
||||
case SPACER_ENGINE:
|
||||
return alloc(spacer::dl_interface, *m_ctx);
|
||||
case DATALOG_ENGINE:
|
||||
return alloc(rel_context, *m_ctx);
|
||||
case BMC_ENGINE:
|
||||
|
|
33
src/muz/spacer/CMakeLists.txt
Normal file
33
src/muz/spacer/CMakeLists.txt
Normal file
|
@ -0,0 +1,33 @@
|
|||
z3_add_component(spacer
|
||||
SOURCES
|
||||
spacer_legacy_mev.cpp
|
||||
spacer_legacy_frames.cpp
|
||||
spacer_context.cpp
|
||||
spacer_dl_interface.cpp
|
||||
spacer_farkas_learner.cpp
|
||||
spacer_generalizers.cpp
|
||||
spacer_manager.cpp
|
||||
spacer_marshal.cpp
|
||||
spacer_prop_solver.cpp
|
||||
spacer_smt_context_manager.cpp
|
||||
spacer_sym_mux.cpp
|
||||
spacer_util.cpp
|
||||
spacer_itp_solver.cpp
|
||||
spacer_virtual_solver.cpp
|
||||
spacer_legacy_mbp.cpp
|
||||
spacer_proof_utils.cpp
|
||||
spacer_unsat_core_learner.cpp
|
||||
spacer_unsat_core_plugin.cpp
|
||||
spacer_matrix.cpp
|
||||
spacer_min_cut.cpp
|
||||
spacer_antiunify.cpp
|
||||
spacer_mev_array.cpp
|
||||
spacer_qe_project.cpp
|
||||
COMPONENT_DEPENDENCIES
|
||||
arith_tactics
|
||||
core_tactics
|
||||
muz
|
||||
qe
|
||||
smt_tactic
|
||||
transforms
|
||||
)
|
250
src/muz/spacer/obj_equiv_class.h
Normal file
250
src/muz/spacer/obj_equiv_class.h
Normal file
|
@ -0,0 +1,250 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
obj_equiv_class.h
|
||||
|
||||
Abstract:
|
||||
"Equivalence class structure" for objs. Uses a union_find structure internally.
|
||||
Operations are :
|
||||
-Declare a new equivalence class with a single element
|
||||
-Merge two equivalence classes
|
||||
-Retrieve whether two elements are in the same equivalence class
|
||||
-Iterate on all the elements of the equivalence class of a given element
|
||||
-Iterate on all equivalence classes (and then within them)
|
||||
|
||||
Author:
|
||||
|
||||
Julien Braine
|
||||
|
||||
Revision History:
|
||||
|
||||
*/
|
||||
|
||||
#ifndef OBJ_EQUIV_CLASS_H_
|
||||
#define OBJ_EQUIV_CLASS_H_
|
||||
|
||||
#include "util/union_find.h"
|
||||
#include "ast/ast_util.h"
|
||||
|
||||
namespace spacer {
|
||||
//All functions naturally add their parameters to the union_find class
|
||||
template<typename OBJ, typename Manager>
|
||||
class obj_equiv_class {
|
||||
basic_union_find m_uf;
|
||||
obj_map<OBJ, unsigned> m_to_int;
|
||||
ref_vector<OBJ, Manager> m_to_obj;
|
||||
|
||||
unsigned add_elem_impl(OBJ*o) {
|
||||
unsigned id = m_to_obj.size();
|
||||
m_to_int.insert(o, id);
|
||||
m_to_obj.push_back(o);
|
||||
return id;
|
||||
}
|
||||
unsigned add_if_not_there(OBJ*o) {
|
||||
unsigned id;
|
||||
if(!m_to_int.find(o, id)) {
|
||||
id = add_elem_impl(o);
|
||||
}
|
||||
return id;
|
||||
}
|
||||
|
||||
public:
|
||||
class iterator;
|
||||
class equiv_iterator;
|
||||
friend class iterator;
|
||||
friend class equiv_iterator;
|
||||
|
||||
obj_equiv_class(Manager& m) : m_to_obj(m) {}
|
||||
|
||||
void add_elem(OBJ*o) {
|
||||
SASSERT(!m_to_int.find(o));
|
||||
add_elem_impl(o);
|
||||
}
|
||||
|
||||
//Invalidates all iterators
|
||||
void merge(OBJ* a, OBJ* b) {
|
||||
unsigned v1 = add_if_not_there(a);
|
||||
unsigned v2 = add_if_not_there(b);
|
||||
unsigned tmp1 = m_uf.find(v1);
|
||||
unsigned tmp2 = m_uf.find(v2);
|
||||
m_uf.merge(tmp1, tmp2);
|
||||
}
|
||||
|
||||
void reset() {
|
||||
m_uf.reset();
|
||||
m_to_int.reset();
|
||||
m_to_obj.reset();
|
||||
}
|
||||
|
||||
bool are_equiv(OBJ*a, OBJ*b) {
|
||||
unsigned id1 = add_if_not_there(a);
|
||||
unsigned id2 = add_if_not_there(b);
|
||||
return m_uf.find(id1) == m_uf.find(id2);
|
||||
}
|
||||
|
||||
class iterator {
|
||||
friend class obj_equiv_class;
|
||||
private :
|
||||
const obj_equiv_class& m_ouf;
|
||||
unsigned m_curr_id;
|
||||
bool m_first;
|
||||
iterator(const obj_equiv_class& uf, unsigned id, bool f) :
|
||||
m_ouf(uf), m_curr_id(id), m_first(f) {}
|
||||
public :
|
||||
OBJ*operator*() {return m_ouf.m_to_obj[m_curr_id];}
|
||||
|
||||
iterator& operator++() {
|
||||
m_curr_id = m_ouf.m_uf.next(m_curr_id);
|
||||
m_first = false;
|
||||
return *this;
|
||||
}
|
||||
bool operator==(const iterator& o) {
|
||||
SASSERT(&m_ouf == &o.m_ouf);
|
||||
return m_first == o.m_first && m_curr_id == o.m_curr_id;
|
||||
}
|
||||
bool operator!=(const iterator& o) {return !(*this == o);}
|
||||
};
|
||||
|
||||
iterator begin(OBJ*o) {
|
||||
unsigned id = add_if_not_there(o);
|
||||
return iterator(*this, id, true);
|
||||
}
|
||||
iterator end(OBJ*o) {
|
||||
unsigned id = add_if_not_there(o);
|
||||
return iterator(*this, id, false);
|
||||
}
|
||||
|
||||
class eq_class {
|
||||
private :
|
||||
iterator m_begin;
|
||||
iterator m_end;
|
||||
public :
|
||||
eq_class(const iterator& a, const iterator& b) : m_begin(a), m_end(b) {}
|
||||
iterator begin() {return m_begin;}
|
||||
iterator end() {return m_end;}
|
||||
};
|
||||
|
||||
class equiv_iterator {
|
||||
friend class obj_equiv_class;
|
||||
private :
|
||||
const obj_equiv_class& m_ouf;
|
||||
unsigned m_rootnb;
|
||||
equiv_iterator(const obj_equiv_class& uf, unsigned nb) :
|
||||
m_ouf(uf), m_rootnb(nb) {
|
||||
while(m_rootnb != m_ouf.m_to_obj.size() &&
|
||||
m_ouf.m_uf.is_root(m_rootnb) != true)
|
||||
{ m_rootnb++; }
|
||||
}
|
||||
public :
|
||||
eq_class operator*() {
|
||||
return eq_class(iterator(m_ouf, m_rootnb, true),
|
||||
iterator(m_ouf, m_rootnb, false));
|
||||
}
|
||||
equiv_iterator& operator++() {
|
||||
do {
|
||||
m_rootnb++;
|
||||
} while(m_rootnb != m_ouf.m_to_obj.size() &&
|
||||
m_ouf.m_uf.is_root(m_rootnb) != true);
|
||||
return *this;
|
||||
}
|
||||
bool operator==(const equiv_iterator& o) {
|
||||
SASSERT(&m_ouf == &o.m_ouf);
|
||||
return m_rootnb == o.m_rootnb;
|
||||
}
|
||||
bool operator!=(const equiv_iterator& o) {return !(*this == o);}
|
||||
};
|
||||
|
||||
equiv_iterator begin() {return equiv_iterator(*this, 0);}
|
||||
equiv_iterator end() {return equiv_iterator(*this, m_to_obj.size());}
|
||||
};
|
||||
|
||||
typedef obj_equiv_class<expr, ast_manager> expr_equiv_class;
|
||||
|
||||
|
||||
/**
|
||||
Factors input vector v into equivalence classes and the rest
|
||||
*/
|
||||
inline void factor_eqs(expr_ref_vector &v, expr_equiv_class &equiv) {
|
||||
ast_manager &m = v.get_manager();
|
||||
arith_util arith(m);
|
||||
expr *e1, *e2;
|
||||
|
||||
flatten_and(v);
|
||||
unsigned j = 0;
|
||||
for (unsigned i = 0; i < v.size(); ++i) {
|
||||
if (m.is_eq(v.get(i), e1, e2)) {
|
||||
if (arith.is_zero(e1)) {
|
||||
expr* t;
|
||||
t = e1; e1 = e2; e2 = t;
|
||||
}
|
||||
|
||||
// y + -1*x == 0
|
||||
if (arith.is_zero(e2) && arith.is_add(e1) &&
|
||||
to_app(e1)->get_num_args() == 2) {
|
||||
expr *a0, *a1, *x;
|
||||
|
||||
a0 = to_app(e1)->get_arg(0);
|
||||
a1 = to_app(e1)->get_arg(1);
|
||||
|
||||
if (arith.is_times_minus_one(a1, x)) {
|
||||
e1 = a0;
|
||||
e2 = x;
|
||||
}
|
||||
else if (arith.is_times_minus_one(a0, x)) {
|
||||
e1 = a1;
|
||||
e2 = x;
|
||||
}
|
||||
}
|
||||
equiv.merge(e1, e2);
|
||||
}
|
||||
else {
|
||||
if (j < i) {v[j] = v.get(i);}
|
||||
j++;
|
||||
}
|
||||
}
|
||||
v.shrink(j);
|
||||
}
|
||||
|
||||
/**
|
||||
* converts equivalence classes to equalities
|
||||
*/
|
||||
inline void equiv_to_expr(expr_equiv_class &equiv, expr_ref_vector &out) {
|
||||
ast_manager &m = out.get_manager();
|
||||
for (auto eq_class : equiv) {
|
||||
expr *rep = nullptr;
|
||||
for (expr *elem : eq_class) {
|
||||
if (!m.is_value (elem)) {
|
||||
rep = elem;
|
||||
break;
|
||||
}
|
||||
}
|
||||
SASSERT(rep);
|
||||
for (expr *elem : eq_class) {
|
||||
if (rep != elem) {out.push_back (m.mk_eq (rep, elem));}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* expands equivalence classes to all derivable equalities
|
||||
*/
|
||||
inline bool equiv_to_expr_full(expr_equiv_class &equiv, expr_ref_vector &out) {
|
||||
ast_manager &m = out.get_manager();
|
||||
bool dirty = false;
|
||||
for (auto eq_class : equiv) {
|
||||
for (auto a = eq_class.begin(), end = eq_class.end(); a != end; ++a) {
|
||||
expr_equiv_class::iterator b(a);
|
||||
for (++b; b != end; ++b) {
|
||||
out.push_back(m.mk_eq(*a, *b));
|
||||
dirty = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return dirty;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
459
src/muz/spacer/spacer_antiunify.cpp
Normal file
459
src/muz/spacer/spacer_antiunify.cpp
Normal file
|
@ -0,0 +1,459 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_antiunify.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
Antiunification utilities
|
||||
|
||||
Author:
|
||||
|
||||
Bernhard Gleiss
|
||||
Arie Gurfinkel
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#include"muz/spacer/spacer_antiunify.h"
|
||||
#include"ast/ast.h"
|
||||
#include"ast/rewriter/rewriter.h"
|
||||
#include"ast/rewriter/rewriter_def.h"
|
||||
#include"ast/arith_decl_plugin.h"
|
||||
#include"ast/ast_util.h"
|
||||
#include"ast/expr_abstract.h"
|
||||
|
||||
namespace spacer {
|
||||
|
||||
// Abstracts numeric values by variables
|
||||
struct var_abs_rewriter : public default_rewriter_cfg {
|
||||
ast_manager &m;
|
||||
arith_util m_util;
|
||||
ast_mark m_seen;
|
||||
ast_mark m_has_num;
|
||||
unsigned m_var_index;
|
||||
expr_ref_vector m_pinned;
|
||||
obj_map<expr, expr*>& m_substitution;
|
||||
ptr_vector<expr> m_stack;
|
||||
|
||||
var_abs_rewriter (ast_manager &manager, obj_map<expr, expr*>& substitution,
|
||||
unsigned k = 0) :
|
||||
m(manager), m_util(m), m_var_index(k),
|
||||
m_pinned(m), m_substitution(substitution) {}
|
||||
|
||||
void reset(unsigned k = 0) {
|
||||
m_pinned.reset();
|
||||
m_var_index = k;
|
||||
}
|
||||
|
||||
bool pre_visit(expr * t) {
|
||||
bool r = (!m_seen.is_marked(t) || m_has_num.is_marked(t));
|
||||
// only unify if convex closure will not contain non-linear multiplication
|
||||
if (m_util.is_mul(t))
|
||||
{
|
||||
bool contains_const_child = false;
|
||||
app* a = to_app(t);
|
||||
for (unsigned i=0, sz = a->get_num_args(); i < sz; ++i) {
|
||||
if (m_util.is_numeral(a->get_arg(i))) {
|
||||
contains_const_child = true;
|
||||
}
|
||||
}
|
||||
if (!contains_const_child) {r = false;}
|
||||
}
|
||||
if (r) {m_stack.push_back (t);}
|
||||
return r;
|
||||
}
|
||||
|
||||
|
||||
br_status reduce_app (func_decl * f, unsigned num, expr * const * args,
|
||||
expr_ref & result, proof_ref & result_pr) {
|
||||
expr *s;
|
||||
s = m_stack.back();
|
||||
m_stack.pop_back();
|
||||
if (is_app(s)) {
|
||||
app *a = to_app(s);
|
||||
for (unsigned i=0, sz = a->get_num_args(); i < sz; ++i) {
|
||||
if (m_has_num.is_marked(a->get_arg(i))) {
|
||||
m_has_num.mark(a,true);
|
||||
return BR_FAILED;
|
||||
}
|
||||
}
|
||||
}
|
||||
return BR_FAILED;
|
||||
}
|
||||
|
||||
bool cache_all_results() const { return false; }
|
||||
bool cache_results() const { return false; }
|
||||
|
||||
bool get_subst(expr * s, expr * & t, proof * & t_pr) {
|
||||
if (m_util.is_numeral(s)) {
|
||||
t = m.mk_var(m_var_index++, m.get_sort(s));
|
||||
m_substitution.insert(t, s);
|
||||
m_pinned.push_back(t);
|
||||
m_has_num.mark(s, true);
|
||||
m_seen.mark(t, true);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
/*
|
||||
* construct m_g, which is a generalization of t, where every constant
|
||||
* is replaced by a variable for any variable in m_g, remember the
|
||||
* substitution to get back t and save it in m_substitutions
|
||||
*/
|
||||
anti_unifier::anti_unifier(expr* t, ast_manager& man) : m(man), m_pinned(m), m_g(m)
|
||||
{
|
||||
m_pinned.push_back(t);
|
||||
|
||||
obj_map<expr, expr*> substitution;
|
||||
|
||||
var_abs_rewriter var_abs_cfg(m, substitution);
|
||||
rewriter_tpl<var_abs_rewriter> var_abs_rw (m, false, var_abs_cfg);
|
||||
var_abs_rw (t, m_g);
|
||||
|
||||
m_substitutions.push_back(substitution); //TODO: refactor into vector, remove k
|
||||
}
|
||||
|
||||
/* traverses m_g and t in parallel. if they only differ in constants
|
||||
* (i.e. m_g contains a variable, where t contains a constant), then
|
||||
* add the substitutions, which need to be applied to m_g to get t, to
|
||||
* m_substitutions.
|
||||
*/
|
||||
bool anti_unifier::add_term(expr* t) {
|
||||
m_pinned.push_back(t);
|
||||
|
||||
ptr_vector<expr> todo;
|
||||
ptr_vector<expr> todo2;
|
||||
todo.push_back(m_g);
|
||||
todo2.push_back(t);
|
||||
|
||||
ast_mark visited;
|
||||
|
||||
arith_util util(m);
|
||||
|
||||
obj_map<expr, expr*> substitution;
|
||||
|
||||
while (!todo.empty()) {
|
||||
expr* current = todo.back();
|
||||
todo.pop_back();
|
||||
expr* current2 = todo2.back();
|
||||
todo2.pop_back();
|
||||
|
||||
if (!visited.is_marked(current)) {
|
||||
visited.mark(current, true);
|
||||
|
||||
if (is_var(current)) {
|
||||
// TODO: for now we don't allow variables in the terms we want to antiunify
|
||||
SASSERT(m_substitutions[0].contains(current));
|
||||
if (util.is_numeral(current2)) {
|
||||
substitution.insert(current, current2);
|
||||
}
|
||||
else {return false;}
|
||||
}
|
||||
else {
|
||||
SASSERT(is_app(current));
|
||||
|
||||
if (is_app(current2) &&
|
||||
to_app(current)->get_decl() == to_app(current2)->get_decl() &&
|
||||
to_app(current)->get_num_args() == to_app(current2)->get_num_args()) {
|
||||
// TODO: what to do for numerals here? E.g. if we
|
||||
// have 1 and 2, do they have the same decl or are
|
||||
// the decls already different?
|
||||
SASSERT (!util.is_numeral(current) || current == current2);
|
||||
for (unsigned i = 0, num_args = to_app(current)->get_num_args();
|
||||
i < num_args; ++i) {
|
||||
todo.push_back(to_app(current)->get_arg(i));
|
||||
todo2.push_back(to_app(current2)->get_arg(i));
|
||||
}
|
||||
}
|
||||
else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// we now know that the terms can be anti-unified, so add the cached substitution
|
||||
m_substitutions.push_back(substitution);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* returns m_g, where additionally any variable, which has only equal
|
||||
* substitutions, is substituted with that substitution
|
||||
*/
|
||||
void anti_unifier::finalize() {
|
||||
ptr_vector<expr> todo;
|
||||
todo.push_back(m_g);
|
||||
|
||||
ast_mark visited;
|
||||
|
||||
obj_map<expr, expr*> generalization;
|
||||
|
||||
arith_util util(m);
|
||||
|
||||
// post-order traversel which ignores constants and handles them
|
||||
// directly when the enclosing term of the constant is handled
|
||||
while (!todo.empty()) {
|
||||
expr* current = todo.back();
|
||||
SASSERT(is_app(current));
|
||||
|
||||
// if we haven't already visited current
|
||||
if (!visited.is_marked(current)) {
|
||||
bool existsUnvisitedParent = false;
|
||||
|
||||
for (unsigned i = 0, sz = to_app(current)->get_num_args(); i < sz; ++i) {
|
||||
expr* argument = to_app(current)->get_arg(i);
|
||||
|
||||
if (!is_var(argument)) {
|
||||
SASSERT(is_app(argument));
|
||||
// if we haven't visited the current parent yet
|
||||
if(!visited.is_marked(argument)) {
|
||||
// add it to the stack
|
||||
todo.push_back(argument);
|
||||
existsUnvisitedParent = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if we already visited all parents, we can visit current too
|
||||
if (!existsUnvisitedParent) {
|
||||
visited.mark(current, true);
|
||||
todo.pop_back();
|
||||
|
||||
ptr_buffer<expr> arg_list;
|
||||
for (unsigned i = 0, num_args = to_app(current)->get_num_args();
|
||||
i < num_args; ++i) {
|
||||
expr* argument = to_app(current)->get_arg(i);
|
||||
|
||||
if (is_var(argument)) {
|
||||
// compute whether there are different
|
||||
// substitutions for argument
|
||||
bool containsDifferentSubstitutions = false;
|
||||
|
||||
for (unsigned i=0, sz = m_substitutions.size(); i+1 < sz; ++i) {
|
||||
SASSERT(m_substitutions[i].contains(argument));
|
||||
SASSERT(m_substitutions[i+1].contains(argument));
|
||||
|
||||
// TODO: how to check equality?
|
||||
if (m_substitutions[i][argument] !=
|
||||
m_substitutions[i+1][argument])
|
||||
{
|
||||
containsDifferentSubstitutions = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// if yes, use the variable
|
||||
if (containsDifferentSubstitutions) {
|
||||
arg_list.push_back(argument);
|
||||
}
|
||||
// otherwise use the concrete value instead
|
||||
// and remove the substitutions
|
||||
else
|
||||
{
|
||||
arg_list.push_back(m_substitutions[0][argument]);
|
||||
|
||||
for (unsigned i=0, sz = m_substitutions.size(); i < sz; ++i) {
|
||||
SASSERT(m_substitutions[i].contains(argument));
|
||||
m_substitutions[i].remove(argument);
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
SASSERT(generalization.contains(argument));
|
||||
arg_list.push_back(generalization[argument]);
|
||||
}
|
||||
}
|
||||
|
||||
SASSERT(to_app(current)->get_num_args() == arg_list.size());
|
||||
expr_ref application(m.mk_app(to_app(current)->get_decl(),
|
||||
to_app(current)->get_num_args(),
|
||||
arg_list.c_ptr()), m);
|
||||
m_pinned.push_back(application);
|
||||
generalization.insert(current, application);
|
||||
}
|
||||
}
|
||||
else {
|
||||
todo.pop_back();
|
||||
}
|
||||
}
|
||||
|
||||
m_g = generalization[m_g];
|
||||
}
|
||||
|
||||
|
||||
class ncc_less_than_key
|
||||
{
|
||||
public:
|
||||
ncc_less_than_key(arith_util& util) : m_util(util) {}
|
||||
|
||||
bool operator() (const expr*& e1, const expr*& e2) {
|
||||
rational val1;
|
||||
rational val2;
|
||||
|
||||
if (m_util.is_numeral(e1, val1) && m_util.is_numeral(e2, val2))
|
||||
{
|
||||
return val1 < val2;
|
||||
}
|
||||
else
|
||||
{
|
||||
SASSERT(false);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
arith_util m_util;
|
||||
};
|
||||
|
||||
/*
|
||||
* if there is a single interval which exactly captures each of the
|
||||
* substitutions, return the corresponding closure, otherwise do
|
||||
* nothing
|
||||
*/
|
||||
bool naive_convex_closure::compute_closure(anti_unifier& au, ast_manager& m,
|
||||
expr_ref& result) {
|
||||
arith_util util(m);
|
||||
|
||||
SASSERT(au.get_num_substitutions() > 0);
|
||||
if (au.get_substitution(0).size() == 0) {
|
||||
result = au.get_generalization();
|
||||
return true;
|
||||
}
|
||||
|
||||
// check that all substitutions have the same size
|
||||
for (unsigned i=0, sz = au.get_num_substitutions(); i+1 < sz; ++i) {
|
||||
if (au.get_substitution(i).size() != au.get_substitution(i+1).size()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// for each substitution entry
|
||||
bool is_first_key = true;
|
||||
unsigned lower_bound;
|
||||
unsigned upper_bound;
|
||||
for (const auto& pair : au.get_substitution(0)) {
|
||||
// construct vector
|
||||
expr* key = &pair.get_key();
|
||||
vector<unsigned> entries;
|
||||
|
||||
rational val;
|
||||
for (unsigned i=0, sz = au.get_num_substitutions(); i < sz; ++i)
|
||||
{
|
||||
if (util.is_numeral(au.get_substitution(i)[key], val) &&
|
||||
val.is_unsigned()) {
|
||||
entries.push_back(val.get_unsigned());
|
||||
}
|
||||
else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// check whether vector represents interval
|
||||
unsigned current_lower_bound;
|
||||
unsigned current_upper_bound;
|
||||
|
||||
// if vector represents interval
|
||||
if (get_range(entries, current_lower_bound, current_upper_bound)) {
|
||||
// if interval is the same as previous interval
|
||||
if (is_first_key) {
|
||||
is_first_key = false;
|
||||
lower_bound = current_lower_bound;
|
||||
upper_bound = current_upper_bound;
|
||||
}
|
||||
else {
|
||||
if (current_lower_bound != lower_bound ||
|
||||
current_upper_bound != upper_bound) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
// otherwise we don't do a convex closure
|
||||
else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// we finally know that we can express the substitutions using a
|
||||
// single interval, so build the expression 1. construct const
|
||||
expr_ref const_ref(m.mk_const(symbol("scti!0"), util.mk_int()), m);
|
||||
|
||||
// 2. construct body with const
|
||||
expr_ref lit1(util.mk_le(util.mk_int(lower_bound), const_ref), m);
|
||||
expr_ref lit2(util.mk_le(const_ref, util.mk_int(upper_bound)), m);
|
||||
expr_ref lit3(m);
|
||||
substitute_vars_by_const(m, au.get_generalization(), const_ref, lit3);
|
||||
|
||||
expr_ref_vector args(m);
|
||||
args.push_back(lit1);
|
||||
args.push_back(lit2);
|
||||
args.push_back(lit3);
|
||||
expr_ref body_with_consts = mk_and(args);
|
||||
|
||||
// 3. replace const by var
|
||||
ptr_vector<expr> vars;
|
||||
vars.push_back(const_ref);
|
||||
|
||||
expr_ref body(m);
|
||||
expr_abstract(m, 0, vars.size(), (expr*const*)vars.c_ptr(), body_with_consts, body);
|
||||
|
||||
// 4. introduce quantifier
|
||||
ptr_vector<sort> sorts;
|
||||
sorts.push_back(util.mk_int());
|
||||
svector<symbol> names;
|
||||
names.push_back(symbol("scti!0"));
|
||||
|
||||
result = expr_ref(m.mk_exists(vars.size(), sorts.c_ptr(), names.c_ptr(), body),m);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool naive_convex_closure::get_range(vector<unsigned int>& v,
|
||||
unsigned int& lower_bound, unsigned int& upper_bound)
|
||||
{
|
||||
// sort substitutions
|
||||
std::sort(v.begin(), v.end());
|
||||
|
||||
// check that numbers are consecutive
|
||||
for (unsigned i=0; i+1 < v.size(); ++i) {
|
||||
if (v[i] + 1 != v[i+1]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
SASSERT(v.size() > 0);
|
||||
lower_bound = v[0];
|
||||
upper_bound = v.back();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
struct subs_rewriter_cfg : public default_rewriter_cfg {
|
||||
ast_manager &m;
|
||||
expr_ref m_c;
|
||||
|
||||
subs_rewriter_cfg (ast_manager &manager, expr* c) : m(manager), m_c(c, m) {}
|
||||
|
||||
bool reduce_var(var * t, expr_ref & result, proof_ref & result_pr) {
|
||||
result = m_c;
|
||||
result_pr = 0;
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
void naive_convex_closure::substitute_vars_by_const(ast_manager& m, expr* t,
|
||||
expr* c, expr_ref& res) {
|
||||
subs_rewriter_cfg subs_cfg(m, c);
|
||||
rewriter_tpl<subs_rewriter_cfg> subs_rw (m, false, subs_cfg);
|
||||
subs_rw (t, res);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
template class rewriter_tpl<spacer::var_abs_rewriter>;
|
||||
template class rewriter_tpl<spacer::subs_rewriter_cfg>;
|
67
src/muz/spacer/spacer_antiunify.h
Normal file
67
src/muz/spacer/spacer_antiunify.h
Normal file
|
@ -0,0 +1,67 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_antiunify.h
|
||||
|
||||
Abstract:
|
||||
|
||||
Antiunification utilities
|
||||
|
||||
Author:
|
||||
|
||||
Bernhard Gleiss
|
||||
Arie Gurfinkel
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef _SPACER_ANTIUNIFY_H_
|
||||
#define _SPACER_ANTIUNIFY_H_
|
||||
|
||||
#include "ast/ast.h"
|
||||
|
||||
namespace spacer {
|
||||
class anti_unifier
|
||||
{
|
||||
public:
|
||||
anti_unifier(expr* t, ast_manager& m);
|
||||
~anti_unifier() {}
|
||||
|
||||
bool add_term(expr* t);
|
||||
void finalize();
|
||||
|
||||
expr* get_generalization() {return m_g;}
|
||||
unsigned get_num_substitutions() {return m_substitutions.size();}
|
||||
obj_map<expr, expr*> get_substitution(unsigned index){
|
||||
SASSERT(index < m_substitutions.size());
|
||||
return m_substitutions[index];
|
||||
}
|
||||
|
||||
private:
|
||||
ast_manager& m;
|
||||
// tracking all created expressions
|
||||
expr_ref_vector m_pinned;
|
||||
|
||||
expr_ref m_g;
|
||||
|
||||
vector<obj_map<expr, expr*>> m_substitutions;
|
||||
};
|
||||
|
||||
class naive_convex_closure
|
||||
{
|
||||
public:
|
||||
static bool compute_closure(anti_unifier& au, ast_manager& m,
|
||||
expr_ref& result);
|
||||
|
||||
private:
|
||||
static bool get_range(vector<unsigned>& v, unsigned& lower_bound,
|
||||
unsigned& upper_bound);
|
||||
static void substitute_vars_by_const(ast_manager& m, expr* t, expr* c,
|
||||
expr_ref& res);
|
||||
};
|
||||
|
||||
}
|
||||
#endif
|
3503
src/muz/spacer/spacer_context.cpp
Normal file
3503
src/muz/spacer/spacer_context.cpp
Normal file
File diff suppressed because it is too large
Load diff
840
src/muz/spacer/spacer_context.h
Normal file
840
src/muz/spacer/spacer_context.h
Normal file
|
@ -0,0 +1,840 @@
|
|||
/**++
|
||||
Copyright (c) 2017 Microsoft Corporation and Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_context.h
|
||||
|
||||
Abstract:
|
||||
|
||||
SPACER predicate transformers and search context.
|
||||
|
||||
Author:
|
||||
|
||||
Arie Gurfinkel
|
||||
Anvesh Komuravelli
|
||||
|
||||
Based on muz/pdr/pdr_context.h by Nikolaj Bjorner (nbjorner)
|
||||
|
||||
Notes:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef _SPACER_CONTEXT_H_
|
||||
#define _SPACER_CONTEXT_H_
|
||||
|
||||
#ifdef _CYGWIN
|
||||
#undef min
|
||||
#undef max
|
||||
#endif
|
||||
#include <queue>
|
||||
#include "spacer_manager.h"
|
||||
#include "spacer_prop_solver.h"
|
||||
#include "fixedpoint_params.hpp"
|
||||
|
||||
namespace datalog {
|
||||
class rule_set;
|
||||
class context;
|
||||
};
|
||||
|
||||
namespace spacer {
|
||||
|
||||
class pred_transformer;
|
||||
class derivation;
|
||||
class pob_queue;
|
||||
class context;
|
||||
|
||||
typedef obj_map<datalog::rule const, app_ref_vector*> rule2inst;
|
||||
typedef obj_map<func_decl, pred_transformer*> decl2rel;
|
||||
|
||||
class pob;
|
||||
typedef ref<pob> pob_ref;
|
||||
typedef sref_vector<pob> pob_ref_vector;
|
||||
|
||||
class reach_fact;
|
||||
typedef ref<reach_fact> reach_fact_ref;
|
||||
typedef sref_vector<reach_fact> reach_fact_ref_vector;
|
||||
|
||||
class reach_fact {
|
||||
unsigned m_ref_count;
|
||||
|
||||
expr_ref m_fact;
|
||||
ptr_vector<app> m_aux_vars;
|
||||
|
||||
const datalog::rule &m_rule;
|
||||
reach_fact_ref_vector m_justification;
|
||||
|
||||
bool m_init;
|
||||
|
||||
public:
|
||||
reach_fact (ast_manager &m, const datalog::rule &rule,
|
||||
expr* fact, const ptr_vector<app> &aux_vars,
|
||||
bool init = false) :
|
||||
m_ref_count (0), m_fact (fact, m), m_aux_vars (aux_vars),
|
||||
m_rule(rule), m_init (init) {}
|
||||
reach_fact (ast_manager &m, const datalog::rule &rule,
|
||||
expr* fact, bool init = false) :
|
||||
m_ref_count (0), m_fact (fact, m), m_rule(rule), m_init (init) {}
|
||||
|
||||
bool is_init () {return m_init;}
|
||||
const datalog::rule& get_rule () {return m_rule;}
|
||||
|
||||
void add_justification (reach_fact *f) {m_justification.push_back (f);}
|
||||
const reach_fact_ref_vector& get_justifications () {return m_justification;}
|
||||
|
||||
expr *get () {return m_fact.get ();}
|
||||
const ptr_vector<app> &aux_vars () {return m_aux_vars;}
|
||||
|
||||
void inc_ref () {++m_ref_count;}
|
||||
void dec_ref ()
|
||||
{
|
||||
SASSERT (m_ref_count > 0);
|
||||
--m_ref_count;
|
||||
if(m_ref_count == 0) { dealloc(this); }
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
class lemma;
|
||||
typedef ref<lemma> lemma_ref;
|
||||
typedef sref_vector<lemma> lemma_ref_vector;
|
||||
|
||||
typedef pob pob;
|
||||
|
||||
// a lemma
|
||||
class lemma {
|
||||
unsigned m_ref_count;
|
||||
|
||||
ast_manager &m;
|
||||
expr_ref m_body;
|
||||
expr_ref_vector m_cube;
|
||||
app_ref_vector m_bindings;
|
||||
unsigned m_lvl;
|
||||
pob_ref m_pob;
|
||||
bool m_new_pob;
|
||||
|
||||
void mk_expr_core();
|
||||
void mk_cube_core();
|
||||
public:
|
||||
lemma(ast_manager &manager, expr * fml, unsigned lvl);
|
||||
lemma(pob_ref const &p);
|
||||
lemma(pob_ref const &p, expr_ref_vector &cube, unsigned lvl);
|
||||
lemma(const lemma &other) = delete;
|
||||
|
||||
ast_manager &get_ast_manager() {return m;}
|
||||
expr *get_expr();
|
||||
bool is_false();
|
||||
expr_ref_vector const &get_cube();
|
||||
void update_cube(pob_ref const &p, expr_ref_vector &cube);
|
||||
|
||||
bool has_pob() {return m_pob;}
|
||||
pob_ref &get_pob() {return m_pob;}
|
||||
|
||||
unsigned level () const {return m_lvl;}
|
||||
void set_level (unsigned lvl) {m_lvl = lvl;}
|
||||
app_ref_vector& get_bindings() {return m_bindings;}
|
||||
void add_binding(app_ref_vector const &binding) {m_bindings.append(binding);}
|
||||
void mk_insts(expr_ref_vector& inst, expr* e = nullptr);
|
||||
bool is_ground () {return !is_quantifier (get_expr());}
|
||||
|
||||
void inc_ref () {++m_ref_count;}
|
||||
void dec_ref ()
|
||||
{
|
||||
SASSERT (m_ref_count > 0);
|
||||
--m_ref_count;
|
||||
if(m_ref_count == 0) { dealloc(this); }
|
||||
}
|
||||
};
|
||||
|
||||
struct lemma_lt_proc : public std::binary_function<lemma*, lemma *, bool> {
|
||||
bool operator() (lemma *a, lemma *b) {
|
||||
return (a->level () < b->level ()) ||
|
||||
(a->level () == b->level () &&
|
||||
ast_lt_proc() (a->get_expr (), b->get_expr ()));
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
//
|
||||
// Predicate transformer state.
|
||||
// A predicate transformer corresponds to the
|
||||
// set of rules that have the same head predicates.
|
||||
//
|
||||
|
||||
class pred_transformer {
|
||||
|
||||
struct stats {
|
||||
unsigned m_num_propagations;
|
||||
unsigned m_num_invariants;
|
||||
stats() { reset(); }
|
||||
void reset() { memset(this, 0, sizeof(*this)); }
|
||||
};
|
||||
|
||||
/// manager of the lemmas in all the frames
|
||||
#include "spacer_legacy_frames.h"
|
||||
class frames {
|
||||
private:
|
||||
pred_transformer &m_pt;
|
||||
lemma_ref_vector m_lemmas;
|
||||
unsigned m_size;
|
||||
|
||||
bool m_sorted;
|
||||
lemma_lt_proc m_lt;
|
||||
|
||||
void sort ();
|
||||
|
||||
public:
|
||||
frames (pred_transformer &pt) : m_pt (pt), m_size(0), m_sorted (true) {}
|
||||
~frames() {}
|
||||
void simplify_formulas ();
|
||||
|
||||
pred_transformer& pt () {return m_pt;}
|
||||
|
||||
|
||||
void get_frame_lemmas (unsigned level, expr_ref_vector &out) {
|
||||
for (unsigned i = 0, sz = m_lemmas.size (); i < sz; ++i)
|
||||
if(m_lemmas[i]->level() == level) {
|
||||
out.push_back(m_lemmas[i]->get_expr());
|
||||
}
|
||||
}
|
||||
void get_frame_geq_lemmas (unsigned level, expr_ref_vector &out) {
|
||||
for (unsigned i = 0, sz = m_lemmas.size (); i < sz; ++i)
|
||||
if(m_lemmas [i]->level() >= level) {
|
||||
out.push_back(m_lemmas[i]->get_expr());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
unsigned size () const {return m_size;}
|
||||
unsigned lemma_size () const {return m_lemmas.size ();}
|
||||
void add_frame () {m_size++;}
|
||||
void inherit_frames (frames &other) {
|
||||
for (unsigned i = 0, sz = other.m_lemmas.size (); i < sz; ++i) {
|
||||
lemma_ref lem = alloc(lemma, m_pt.get_ast_manager(),
|
||||
other.m_lemmas[i]->get_expr (),
|
||||
other.m_lemmas[i]->level());
|
||||
lem->add_binding(other.m_lemmas[i]->get_bindings());
|
||||
add_lemma(lem.get());
|
||||
}
|
||||
m_sorted = false;
|
||||
}
|
||||
|
||||
bool add_lemma (lemma *lem);
|
||||
void propagate_to_infinity (unsigned level);
|
||||
bool propagate_to_next_level (unsigned level);
|
||||
|
||||
|
||||
};
|
||||
|
||||
/**
|
||||
manager of proof-obligations (pobs)
|
||||
*/
|
||||
class pobs {
|
||||
typedef ptr_buffer<pob, 1> pob_buffer;
|
||||
typedef obj_map<expr, pob_buffer > expr2pob_buffer;
|
||||
|
||||
pred_transformer &m_pt;
|
||||
|
||||
expr2pob_buffer m_pobs;
|
||||
pob_ref_vector m_pinned;
|
||||
public:
|
||||
pobs(pred_transformer &pt) : m_pt(pt) {}
|
||||
pob* mk_pob(pob *parent, unsigned level, unsigned depth,
|
||||
expr *post, app_ref_vector const &b);
|
||||
|
||||
pob* mk_pob(pob *parent, unsigned level, unsigned depth,
|
||||
expr *post) {
|
||||
app_ref_vector b(m_pt.get_ast_manager());
|
||||
return mk_pob (parent, level, depth, post, b);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
typedef obj_map<datalog::rule const, expr*> rule2expr;
|
||||
typedef obj_map<datalog::rule const, ptr_vector<app> > rule2apps;
|
||||
|
||||
manager& pm; // spacer-manager
|
||||
ast_manager& m; // manager
|
||||
context& ctx;
|
||||
|
||||
func_decl_ref m_head; // predicate
|
||||
func_decl_ref_vector m_sig; // signature
|
||||
ptr_vector<pred_transformer> m_use; // places where 'this' is referenced.
|
||||
ptr_vector<datalog::rule> m_rules; // rules used to derive transformer
|
||||
prop_solver m_solver; // solver context
|
||||
solver* m_reach_ctx; // context for reachability facts
|
||||
pobs m_pobs;
|
||||
frames m_frames;
|
||||
reach_fact_ref_vector m_reach_facts; // reach facts
|
||||
/// Number of initial reachability facts
|
||||
unsigned m_rf_init_sz;
|
||||
obj_map<expr, datalog::rule const*> m_tag2rule; // map tag predicate to rule.
|
||||
rule2expr m_rule2tag; // map rule to predicate tag.
|
||||
rule2inst m_rule2inst; // map rules to instantiations of indices
|
||||
rule2expr m_rule2transition; // map rules to transition
|
||||
rule2apps m_rule2vars; // map rule to auxiliary variables
|
||||
expr_ref m_transition; // transition relation.
|
||||
expr_ref m_initial_state; // initial state.
|
||||
app_ref m_extend_lit; // literal to extend initial state
|
||||
bool m_all_init; // true if the pt has no uninterpreted body in any rule
|
||||
ptr_vector<func_decl> m_predicates;
|
||||
stats m_stats;
|
||||
stopwatch m_initialize_watch;
|
||||
stopwatch m_must_reachable_watch;
|
||||
|
||||
|
||||
|
||||
/// Auxiliary variables to represent different disjunctive
|
||||
/// cases of must summaries. Stored over 'n' (a.k.a. new)
|
||||
/// versions of the variables
|
||||
expr_ref_vector m_reach_case_vars;
|
||||
|
||||
void init_sig();
|
||||
void ensure_level(unsigned level);
|
||||
void add_lemma_core (lemma *lemma);
|
||||
void add_lemma_from_child (pred_transformer &child, lemma *lemma, unsigned lvl);
|
||||
|
||||
void mk_assumptions(func_decl* head, expr* fml, expr_ref_vector& result);
|
||||
|
||||
// Initialization
|
||||
void init_rules(decl2rel const& pts, expr_ref& init, expr_ref& transition);
|
||||
void init_rule(decl2rel const& pts, datalog::rule const& rule, vector<bool>& is_init,
|
||||
ptr_vector<datalog::rule const>& rules, expr_ref_vector& transition);
|
||||
void init_atom(decl2rel const& pts, app * atom, app_ref_vector& var_reprs, expr_ref_vector& conj, unsigned tail_idx);
|
||||
|
||||
void simplify_formulas(tactic& tac, expr_ref_vector& fmls);
|
||||
|
||||
// Debugging
|
||||
bool check_filled(app_ref_vector const& v) const;
|
||||
|
||||
void add_premises(decl2rel const& pts, unsigned lvl, datalog::rule& rule, expr_ref_vector& r);
|
||||
|
||||
expr* mk_fresh_reach_case_var ();
|
||||
|
||||
public:
|
||||
pred_transformer(context& ctx, manager& pm, func_decl* head);
|
||||
~pred_transformer();
|
||||
|
||||
inline bool use_native_mbp ();
|
||||
reach_fact *get_reach_fact (expr *v)
|
||||
{
|
||||
for (unsigned i = 0, sz = m_reach_facts.size (); i < sz; ++i)
|
||||
if(v == m_reach_facts [i]->get()) { return m_reach_facts[i]; }
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void add_rule(datalog::rule* r) { m_rules.push_back(r); }
|
||||
void add_use(pred_transformer* pt) { if(!m_use.contains(pt)) { m_use.insert(pt); } }
|
||||
void initialize(decl2rel const& pts);
|
||||
|
||||
func_decl* head() const { return m_head; }
|
||||
ptr_vector<datalog::rule> const& rules() const { return m_rules; }
|
||||
func_decl* sig(unsigned i) const { return m_sig[i]; } // signature
|
||||
func_decl* const* sig() { return m_sig.c_ptr(); }
|
||||
unsigned sig_size() const { return m_sig.size(); }
|
||||
expr* transition() const { return m_transition; }
|
||||
expr* initial_state() const { return m_initial_state; }
|
||||
expr* rule2tag(datalog::rule const* r) { return m_rule2tag.find(r); }
|
||||
unsigned get_num_levels() { return m_frames.size (); }
|
||||
expr_ref get_cover_delta(func_decl* p_orig, int level);
|
||||
void add_cover(unsigned level, expr* property);
|
||||
expr_ref get_reachable ();
|
||||
|
||||
std::ostream& display(std::ostream& strm) const;
|
||||
|
||||
void collect_statistics(statistics& st) const;
|
||||
void reset_statistics();
|
||||
|
||||
bool is_must_reachable (expr* state, model_ref* model = 0);
|
||||
/// \brief Returns reachability fact active in the given model
|
||||
/// all determines whether initial reachability facts are included as well
|
||||
reach_fact *get_used_reach_fact (model_evaluator_util& mev, bool all = true);
|
||||
/// \brief Returns reachability fact active in the origin of the given model
|
||||
reach_fact* get_used_origin_reach_fact (model_evaluator_util &mev, unsigned oidx);
|
||||
expr_ref get_origin_summary (model_evaluator_util &mev,
|
||||
unsigned level, unsigned oidx, bool must,
|
||||
const ptr_vector<app> **aux);
|
||||
|
||||
void remove_predecessors(expr_ref_vector& literals);
|
||||
void find_predecessors(datalog::rule const& r, ptr_vector<func_decl>& predicates) const;
|
||||
void find_predecessors(vector<std::pair<func_decl*, unsigned> >& predicates) const;
|
||||
datalog::rule const* find_rule(model &mev, bool& is_concrete,
|
||||
vector<bool>& reach_pred_used,
|
||||
unsigned& num_reuse_reach);
|
||||
expr* get_transition(datalog::rule const& r) { return m_rule2transition.find(&r); }
|
||||
ptr_vector<app>& get_aux_vars(datalog::rule const& r) { return m_rule2vars.find(&r); }
|
||||
|
||||
bool propagate_to_next_level(unsigned level);
|
||||
void propagate_to_infinity(unsigned level);
|
||||
/// \brief Add a lemma to the current context and all users
|
||||
bool add_lemma(expr * lemma, unsigned lvl);
|
||||
bool add_lemma(lemma* lem) {return m_frames.add_lemma(lem);}
|
||||
expr* get_reach_case_var (unsigned idx) const;
|
||||
bool has_reach_facts () const { return !m_reach_facts.empty () ;}
|
||||
|
||||
/// initialize reachability facts using initial rules
|
||||
void init_reach_facts ();
|
||||
void add_reach_fact (reach_fact *fact); // add reachability fact
|
||||
reach_fact* get_last_reach_fact () const { return m_reach_facts.back (); }
|
||||
expr* get_last_reach_case_var () const;
|
||||
|
||||
pob* mk_pob(pob *parent, unsigned level, unsigned depth,
|
||||
expr *post, app_ref_vector const &b){
|
||||
return m_pobs.mk_pob(parent, level, depth, post, b);
|
||||
}
|
||||
|
||||
pob* mk_pob(pob *parent, unsigned level, unsigned depth,
|
||||
expr *post) {
|
||||
return m_pobs.mk_pob(parent, level, depth, post);
|
||||
}
|
||||
|
||||
lbool is_reachable(pob& n, expr_ref_vector* core, model_ref *model,
|
||||
unsigned& uses_level, bool& is_concrete,
|
||||
datalog::rule const*& r,
|
||||
vector<bool>& reach_pred_used,
|
||||
unsigned& num_reuse_reach);
|
||||
bool is_invariant(unsigned level, expr* lemma,
|
||||
unsigned& solver_level, expr_ref_vector* core = 0);
|
||||
bool check_inductive(unsigned level, expr_ref_vector& state,
|
||||
unsigned& assumes_level);
|
||||
|
||||
expr_ref get_formulas(unsigned level, bool add_axioms);
|
||||
|
||||
void simplify_formulas();
|
||||
|
||||
context& get_context () const {return ctx;}
|
||||
manager& get_manager() const { return pm; }
|
||||
ast_manager& get_ast_manager() const { return m; }
|
||||
|
||||
void add_premises(decl2rel const& pts, unsigned lvl, expr_ref_vector& r);
|
||||
|
||||
void inherit_properties(pred_transformer& other);
|
||||
|
||||
void ground_free_vars(expr* e, app_ref_vector& vars, ptr_vector<app>& aux_vars,
|
||||
bool is_init);
|
||||
|
||||
/// \brief Adds a given expression to the set of initial rules
|
||||
app* extend_initial (expr *e);
|
||||
|
||||
/// \brief Returns true if the obligation is already blocked by current lemmas
|
||||
bool is_blocked (pob &n, unsigned &uses_level);
|
||||
/// \brief Returns true if the obligation is already blocked by current quantified lemmas
|
||||
bool is_qblocked (pob &n);
|
||||
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* A proof obligation.
|
||||
*/
|
||||
class pob {
|
||||
friend class context;
|
||||
unsigned m_ref_count;
|
||||
/// parent node
|
||||
pob_ref m_parent;
|
||||
/// predicate transformer
|
||||
pred_transformer& m_pt;
|
||||
/// post-condition decided by this node
|
||||
expr_ref m_post;
|
||||
// if m_post is not ground, then m_binding is an instantiation for
|
||||
// all quantified variables
|
||||
app_ref_vector m_binding;
|
||||
/// new post to be swapped in for m_post
|
||||
expr_ref m_new_post;
|
||||
/// level at which to decide the post
|
||||
unsigned m_level;
|
||||
|
||||
unsigned m_depth;
|
||||
|
||||
/// whether a concrete answer to the post is found
|
||||
bool m_open;
|
||||
/// whether to use farkas generalizer to construct a lemma blocking this node
|
||||
bool m_use_farkas;
|
||||
|
||||
unsigned m_weakness;
|
||||
/// derivation representing the position of this node in the parent's rule
|
||||
scoped_ptr<derivation> m_derivation;
|
||||
|
||||
ptr_vector<pob> m_kids;
|
||||
public:
|
||||
pob (pob* parent, pred_transformer& pt,
|
||||
unsigned level, unsigned depth=0, bool add_to_parent=true);
|
||||
|
||||
~pob() {if(m_parent) { m_parent->erase_child(*this); }}
|
||||
|
||||
unsigned weakness() {return m_weakness;}
|
||||
void bump_weakness() {m_weakness++;}
|
||||
void reset_weakness() {m_weakness=0;}
|
||||
|
||||
void inc_level () {m_level++; m_depth++;reset_weakness();}
|
||||
|
||||
void inherit(pob const &p);
|
||||
void set_derivation (derivation *d) {m_derivation = d;}
|
||||
bool has_derivation () const {return (bool)m_derivation;}
|
||||
derivation &get_derivation() const {return *m_derivation.get ();}
|
||||
void reset_derivation () {set_derivation (NULL);}
|
||||
/// detaches derivation from the node without deallocating
|
||||
derivation* detach_derivation () {return m_derivation.detach ();}
|
||||
|
||||
pob* parent () const { return m_parent.get (); }
|
||||
|
||||
pred_transformer& pt () const { return m_pt; }
|
||||
ast_manager& get_ast_manager () const { return m_pt.get_ast_manager (); }
|
||||
manager& get_manager () const { return m_pt.get_manager (); }
|
||||
context& get_context () const {return m_pt.get_context ();}
|
||||
|
||||
unsigned level () const { return m_level; }
|
||||
unsigned depth () const {return m_depth;}
|
||||
|
||||
bool use_farkas_generalizer () const {return m_use_farkas;}
|
||||
void set_farkas_generalizer (bool v) {m_use_farkas = v;}
|
||||
|
||||
expr* post() const { return m_post.get (); }
|
||||
void set_post(expr *post);
|
||||
void set_post(expr *post, app_ref_vector const &b);
|
||||
|
||||
/// indicate that a new post should be set for the node
|
||||
void new_post(expr *post) {if(post != m_post) {m_new_post = post;}}
|
||||
/// true if the node needs to be updated outside of the priority queue
|
||||
bool is_dirty () {return m_new_post;}
|
||||
/// clean a dirty node
|
||||
void clean();
|
||||
|
||||
void reset () {clean (); m_derivation = NULL; m_open = true;}
|
||||
|
||||
bool is_closed () const { return !m_open; }
|
||||
void close();
|
||||
|
||||
void add_child (pob &v) {m_kids.push_back (&v);}
|
||||
void erase_child (pob &v) {m_kids.erase (&v);}
|
||||
|
||||
bool is_ground () { return m_binding.empty (); }
|
||||
app_ref_vector const &get_binding() const {return m_binding;}
|
||||
/*
|
||||
* Return skolem variables that appear in post
|
||||
*/
|
||||
void get_skolems(app_ref_vector& v);
|
||||
|
||||
void inc_ref () {++m_ref_count;}
|
||||
void dec_ref ()
|
||||
{
|
||||
--m_ref_count;
|
||||
if(m_ref_count == 0) { dealloc(this); }
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
struct pob_lt :
|
||||
public std::binary_function<const pob*, const pob*, bool>
|
||||
{bool operator() (const pob *pn1, const pob *pn2) const;};
|
||||
|
||||
struct pob_gt :
|
||||
public std::binary_function<const pob*, const pob*, bool> {
|
||||
pob_lt lt;
|
||||
bool operator() (const pob *n1, const pob *n2) const
|
||||
{return lt(n2, n1);}
|
||||
};
|
||||
|
||||
struct pob_ref_gt :
|
||||
public std::binary_function<const pob_ref&, const model_ref &, bool> {
|
||||
pob_gt gt;
|
||||
bool operator() (const pob_ref &n1, const pob_ref &n2) const
|
||||
{return gt (n1.get (), n2.get ());}
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
*/
|
||||
class derivation {
|
||||
/// a single premise of a derivation
|
||||
class premise {
|
||||
pred_transformer &m_pt;
|
||||
/// origin order in the rule
|
||||
unsigned m_oidx;
|
||||
/// summary fact corresponding to the premise
|
||||
expr_ref m_summary;
|
||||
/// whether this is a must or may premise
|
||||
bool m_must;
|
||||
app_ref_vector m_ovars;
|
||||
|
||||
public:
|
||||
premise (pred_transformer &pt, unsigned oidx, expr *summary, bool must,
|
||||
const ptr_vector<app> *aux_vars = NULL);
|
||||
premise (const premise &p);
|
||||
|
||||
bool is_must () {return m_must;}
|
||||
expr * get_summary () {return m_summary.get ();}
|
||||
app_ref_vector &get_ovars () {return m_ovars;}
|
||||
unsigned get_oidx () {return m_oidx;}
|
||||
pred_transformer &pt () {return m_pt;}
|
||||
|
||||
/// \brief Updated the summary.
|
||||
/// The new summary is over n-variables.
|
||||
void set_summary (expr * summary, bool must,
|
||||
const ptr_vector<app> *aux_vars = NULL);
|
||||
};
|
||||
|
||||
|
||||
/// parent model node
|
||||
pob& m_parent;
|
||||
|
||||
/// the rule corresponding to this derivation
|
||||
const datalog::rule &m_rule;
|
||||
|
||||
/// the premises
|
||||
vector<premise> m_premises;
|
||||
/// pointer to the active premise
|
||||
unsigned m_active;
|
||||
// transition relation over origin variables
|
||||
expr_ref m_trans;
|
||||
// implicitly existentially quantified variables in m_trans
|
||||
app_ref_vector m_evars;
|
||||
/// -- create next child using given model as the guide
|
||||
/// -- returns NULL if there is no next child
|
||||
pob* create_next_child (model_evaluator_util &mev);
|
||||
public:
|
||||
derivation (pob& parent, datalog::rule const& rule,
|
||||
expr *trans, app_ref_vector const &evars);
|
||||
void add_premise (pred_transformer &pt, unsigned oidx,
|
||||
expr * summary, bool must, const ptr_vector<app> *aux_vars = NULL);
|
||||
|
||||
/// creates the first child. Must be called after all the premises
|
||||
/// are added. The model must be valid for the premises
|
||||
/// Returns NULL if no child exits
|
||||
pob *create_first_child (model_evaluator_util &mev);
|
||||
|
||||
/// Create the next child. Must summary of the currently active
|
||||
/// premise must be consistent with the transition relation
|
||||
pob *create_next_child ();
|
||||
|
||||
datalog::rule const& get_rule () const { return m_rule; }
|
||||
pob& get_parent () const { return m_parent; }
|
||||
ast_manager &get_ast_manager () const {return m_parent.get_ast_manager ();}
|
||||
manager &get_manager () const {return m_parent.get_manager ();}
|
||||
context &get_context() const {return m_parent.get_context();}
|
||||
};
|
||||
|
||||
|
||||
class pob_queue {
|
||||
pob_ref m_root;
|
||||
unsigned m_max_level;
|
||||
unsigned m_min_depth;
|
||||
|
||||
std::priority_queue<pob_ref, std::vector<pob_ref>,
|
||||
pob_ref_gt> m_obligations;
|
||||
|
||||
public:
|
||||
pob_queue(): m_root(NULL), m_max_level(0), m_min_depth(0) {}
|
||||
~pob_queue();
|
||||
|
||||
void reset();
|
||||
pob * top ();
|
||||
void pop () {m_obligations.pop ();}
|
||||
void push (pob &n) {m_obligations.push (&n);}
|
||||
|
||||
void inc_level ()
|
||||
{
|
||||
SASSERT (!m_obligations.empty () || m_root);
|
||||
m_max_level++;
|
||||
m_min_depth++;
|
||||
if(m_root && m_obligations.empty()) { m_obligations.push(m_root); }
|
||||
}
|
||||
|
||||
pob& get_root() const { return *m_root.get (); }
|
||||
void set_root(pob& n);
|
||||
bool is_root (pob& n) const {return m_root.get () == &n;}
|
||||
|
||||
unsigned max_level () {return m_max_level;}
|
||||
unsigned min_depth () {return m_min_depth;}
|
||||
unsigned size () {return m_obligations.size ();}
|
||||
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Generalizers (strengthens) a lemma
|
||||
*/
|
||||
class lemma_generalizer {
|
||||
protected:
|
||||
context& m_ctx;
|
||||
public:
|
||||
lemma_generalizer(context& ctx): m_ctx(ctx) {}
|
||||
virtual ~lemma_generalizer() {}
|
||||
virtual void operator()(lemma_ref &lemma) = 0;
|
||||
virtual void collect_statistics(statistics& st) const {}
|
||||
virtual void reset_statistics() {}
|
||||
};
|
||||
|
||||
|
||||
class context {
|
||||
|
||||
struct stats {
|
||||
unsigned m_num_queries;
|
||||
unsigned m_num_reach_queries;
|
||||
unsigned m_num_reuse_reach;
|
||||
unsigned m_max_query_lvl;
|
||||
unsigned m_max_depth;
|
||||
unsigned m_cex_depth;
|
||||
unsigned m_expand_node_undef;
|
||||
unsigned m_num_lemmas;
|
||||
unsigned m_num_restarts;
|
||||
stats() { reset(); }
|
||||
void reset() { memset(this, 0, sizeof(*this)); }
|
||||
};
|
||||
|
||||
// stat watches
|
||||
stopwatch m_solve_watch;
|
||||
stopwatch m_propagate_watch;
|
||||
stopwatch m_reach_watch;
|
||||
stopwatch m_is_reach_watch;
|
||||
stopwatch m_create_children_watch;
|
||||
stopwatch m_init_rules_watch;
|
||||
|
||||
fixedpoint_params const& m_params;
|
||||
ast_manager& m;
|
||||
datalog::context* m_context;
|
||||
manager m_pm;
|
||||
decl2rel m_rels; // Map from relation predicate to fp-operator.
|
||||
func_decl_ref m_query_pred;
|
||||
pred_transformer* m_query;
|
||||
mutable pob_queue m_pob_queue;
|
||||
lbool m_last_result;
|
||||
unsigned m_inductive_lvl;
|
||||
unsigned m_expanded_lvl;
|
||||
ptr_buffer<lemma_generalizer> m_lemma_generalizers;
|
||||
stats m_stats;
|
||||
model_converter_ref m_mc;
|
||||
proof_converter_ref m_pc;
|
||||
bool m_use_native_mbp;
|
||||
bool m_ground_cti;
|
||||
bool m_instantiate;
|
||||
bool m_use_qlemmas;
|
||||
bool m_weak_abs;
|
||||
bool m_use_restarts;
|
||||
unsigned m_restart_initial_threshold;
|
||||
|
||||
// Functions used by search.
|
||||
lbool solve_core (unsigned from_lvl = 0);
|
||||
bool check_reachability ();
|
||||
bool propagate(unsigned min_prop_lvl, unsigned max_prop_lvl,
|
||||
unsigned full_prop_lvl);
|
||||
bool is_reachable(pob &n);
|
||||
lbool expand_node(pob& n);
|
||||
reach_fact *mk_reach_fact (pob& n, model_evaluator_util &mev,
|
||||
datalog::rule const& r);
|
||||
bool create_children(pob& n, datalog::rule const& r,
|
||||
model_evaluator_util &model,
|
||||
const vector<bool>& reach_pred_used);
|
||||
expr_ref mk_sat_answer();
|
||||
expr_ref mk_unsat_answer() const;
|
||||
|
||||
// Generate inductive property
|
||||
void get_level_property(unsigned lvl, expr_ref_vector& res,
|
||||
vector<relation_info> & rs) const;
|
||||
|
||||
|
||||
// Initialization
|
||||
void init_lemma_generalizers(datalog::rule_set& rules);
|
||||
|
||||
bool check_invariant(unsigned lvl);
|
||||
bool check_invariant(unsigned lvl, func_decl* fn);
|
||||
|
||||
void checkpoint();
|
||||
|
||||
void init_rules(datalog::rule_set& rules, decl2rel& transformers);
|
||||
|
||||
void simplify_formulas();
|
||||
|
||||
void reset_lemma_generalizers();
|
||||
|
||||
bool validate();
|
||||
|
||||
unsigned get_cex_depth ();
|
||||
|
||||
public:
|
||||
/**
|
||||
Initial values of predicates are stored in corresponding relations in dctx.
|
||||
|
||||
We check whether there is some reachable state of the relation checked_relation.
|
||||
*/
|
||||
context(
|
||||
fixedpoint_params const& params,
|
||||
ast_manager& m);
|
||||
|
||||
~context();
|
||||
|
||||
fixedpoint_params const& get_params() const { return m_params; }
|
||||
bool use_native_mbp () {return m_use_native_mbp;}
|
||||
bool use_ground_cti () {return m_ground_cti;}
|
||||
bool use_instantiate () { return m_instantiate; }
|
||||
bool use_qlemmas () {return m_use_qlemmas; }
|
||||
|
||||
ast_manager& get_ast_manager() const { return m; }
|
||||
manager& get_manager() { return m_pm; }
|
||||
decl2rel const& get_pred_transformers() const { return m_rels; }
|
||||
pred_transformer& get_pred_transformer(func_decl* p) const
|
||||
{ return *m_rels.find(p); }
|
||||
datalog::context& get_datalog_context() const
|
||||
{ SASSERT(m_context); return *m_context; }
|
||||
expr_ref get_answer();
|
||||
/**
|
||||
* get bottom-up (from query) sequence of ground predicate instances
|
||||
* (for e.g. P(0,1,0,0,3)) that together form a ground derivation to query
|
||||
*/
|
||||
expr_ref get_ground_sat_answer ();
|
||||
|
||||
void collect_statistics(statistics& st) const;
|
||||
void reset_statistics();
|
||||
|
||||
std::ostream& display(std::ostream& strm) const;
|
||||
|
||||
void display_certificate(std::ostream& strm) const {}
|
||||
|
||||
lbool solve(unsigned from_lvl = 0);
|
||||
|
||||
lbool solve_from_lvl (unsigned from_lvl);
|
||||
|
||||
void reset();
|
||||
|
||||
void set_query(func_decl* q) { m_query_pred = q; }
|
||||
|
||||
void set_unsat() { m_last_result = l_false; }
|
||||
|
||||
void set_model_converter(model_converter_ref& mc) { m_mc = mc; }
|
||||
|
||||
void get_rules_along_trace (datalog::rule_ref_vector& rules);
|
||||
|
||||
model_converter_ref get_model_converter() { return m_mc; }
|
||||
|
||||
void set_proof_converter(proof_converter_ref& pc) { m_pc = pc; }
|
||||
|
||||
void update_rules(datalog::rule_set& rules);
|
||||
|
||||
void set_axioms(expr* axioms) { m_pm.set_background(axioms); }
|
||||
|
||||
unsigned get_num_levels(func_decl* p);
|
||||
|
||||
expr_ref get_cover_delta(int level, func_decl* p_orig, func_decl* p);
|
||||
|
||||
void add_cover(int level, func_decl* pred, expr* property);
|
||||
|
||||
expr_ref get_reachable (func_decl* p);
|
||||
|
||||
void add_invariant (func_decl *pred, expr* property);
|
||||
|
||||
model_ref get_model();
|
||||
|
||||
proof_ref get_proof() const;
|
||||
|
||||
pob& get_root() const { return m_pob_queue.get_root(); }
|
||||
|
||||
expr_ref get_constraints (unsigned lvl);
|
||||
void add_constraints (unsigned lvl, expr_ref c);
|
||||
};
|
||||
|
||||
inline bool pred_transformer::use_native_mbp () {return ctx.use_native_mbp ();}
|
||||
}
|
||||
|
||||
#endif
|
354
src/muz/spacer/spacer_dl_interface.cpp
Normal file
354
src/muz/spacer/spacer_dl_interface.cpp
Normal file
|
@ -0,0 +1,354 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Microsoft Corporation and Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_dl.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
SMT2 interface for the datalog SPACER
|
||||
|
||||
Author:
|
||||
|
||||
Arie Gurfinkel
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#include "muz/base/dl_context.h"
|
||||
#include "muz/transforms/dl_mk_coi_filter.h"
|
||||
#include "muz/transforms/dl_mk_interp_tail_simplifier.h"
|
||||
#include "muz/transforms/dl_mk_subsumption_checker.h"
|
||||
#include "muz/transforms/dl_mk_rule_inliner.h"
|
||||
#include "muz/base/dl_rule.h"
|
||||
#include "muz/base/dl_rule_transformer.h"
|
||||
#include "parsers/smt2/smt2parser.h"
|
||||
#include "muz/spacer/spacer_context.h"
|
||||
#include "muz/spacer/spacer_dl_interface.h"
|
||||
#include "muz/base/dl_rule_set.h"
|
||||
#include "muz/transforms/dl_mk_slice.h"
|
||||
#include "muz/transforms/dl_mk_unfold.h"
|
||||
#include "muz/transforms/dl_mk_coalesce.h"
|
||||
#include "model/model_smt2_pp.h"
|
||||
#include "ast/scoped_proof.h"
|
||||
#include "muz/transforms/dl_transforms.h"
|
||||
|
||||
using namespace spacer;
|
||||
|
||||
dl_interface::dl_interface(datalog::context& ctx) :
|
||||
engine_base(ctx.get_manager(), "spacer"),
|
||||
m_ctx(ctx),
|
||||
m_spacer_rules(ctx),
|
||||
m_old_rules(ctx),
|
||||
m_context(0),
|
||||
m_refs(ctx.get_manager())
|
||||
{
|
||||
m_context = alloc(spacer::context, ctx.get_params(), ctx.get_manager());
|
||||
}
|
||||
|
||||
|
||||
dl_interface::~dl_interface()
|
||||
{
|
||||
dealloc(m_context);
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// Check if the new rules are weaker so that we can
|
||||
// re-use existing context.
|
||||
//
|
||||
void dl_interface::check_reset()
|
||||
{
|
||||
datalog::rule_set const& new_rules = m_ctx.get_rules();
|
||||
datalog::rule_ref_vector const& old_rules = m_old_rules.get_rules();
|
||||
bool is_subsumed = !old_rules.empty();
|
||||
for (unsigned i = 0; is_subsumed && i < new_rules.get_num_rules(); ++i) {
|
||||
is_subsumed = false;
|
||||
for (unsigned j = 0; !is_subsumed && j < old_rules.size(); ++j) {
|
||||
if (m_ctx.check_subsumes(*old_rules[j], *new_rules.get_rule(i))) {
|
||||
is_subsumed = true;
|
||||
}
|
||||
}
|
||||
if (!is_subsumed) {
|
||||
TRACE("spacer", new_rules.get_rule(i)->display(m_ctx, tout << "Fresh rule "););
|
||||
m_context->reset();
|
||||
}
|
||||
}
|
||||
m_old_rules.replace_rules(new_rules);
|
||||
}
|
||||
|
||||
|
||||
lbool dl_interface::query(expr * query)
|
||||
{
|
||||
//we restore the initial state in the datalog context
|
||||
m_ctx.ensure_opened();
|
||||
m_refs.reset();
|
||||
m_pred2slice.reset();
|
||||
ast_manager& m = m_ctx.get_manager();
|
||||
datalog::rule_manager& rm = m_ctx.get_rule_manager();
|
||||
datalog::rule_set& rules0 = m_ctx.get_rules();
|
||||
datalog::rule_set old_rules(rules0);
|
||||
func_decl_ref query_pred(m);
|
||||
rm.mk_query(query, m_ctx.get_rules());
|
||||
expr_ref bg_assertion = m_ctx.get_background_assertion();
|
||||
|
||||
check_reset();
|
||||
|
||||
TRACE("spacer",
|
||||
if (!m.is_true(bg_assertion)) {
|
||||
tout << "axioms:\n";
|
||||
tout << mk_pp(bg_assertion, m) << "\n";
|
||||
}
|
||||
tout << "query: " << mk_pp(query, m) << "\n";
|
||||
tout << "rules:\n";
|
||||
m_ctx.display_rules(tout);
|
||||
);
|
||||
|
||||
|
||||
apply_default_transformation(m_ctx);
|
||||
|
||||
if (m_ctx.get_params().xform_slice()) {
|
||||
datalog::rule_transformer transformer(m_ctx);
|
||||
datalog::mk_slice* slice = alloc(datalog::mk_slice, m_ctx);
|
||||
transformer.register_plugin(slice);
|
||||
m_ctx.transform_rules(transformer);
|
||||
|
||||
// track sliced predicates.
|
||||
obj_map<func_decl, func_decl*> const& preds = slice->get_predicates();
|
||||
obj_map<func_decl, func_decl*>::iterator it = preds.begin();
|
||||
obj_map<func_decl, func_decl*>::iterator end = preds.end();
|
||||
for (; it != end; ++it) {
|
||||
m_pred2slice.insert(it->m_key, it->m_value);
|
||||
m_refs.push_back(it->m_key);
|
||||
m_refs.push_back(it->m_value);
|
||||
}
|
||||
}
|
||||
|
||||
if (m_ctx.get_params().xform_unfold_rules() > 0) {
|
||||
unsigned num_unfolds = m_ctx.get_params().xform_unfold_rules();
|
||||
datalog::rule_transformer transf1(m_ctx), transf2(m_ctx);
|
||||
transf1.register_plugin(alloc(datalog::mk_coalesce, m_ctx));
|
||||
transf2.register_plugin(alloc(datalog::mk_unfold, m_ctx));
|
||||
if (m_ctx.get_params().xform_coalesce_rules()) {
|
||||
m_ctx.transform_rules(transf1);
|
||||
}
|
||||
while (num_unfolds > 0) {
|
||||
m_ctx.transform_rules(transf2);
|
||||
--num_unfolds;
|
||||
}
|
||||
}
|
||||
|
||||
const datalog::rule_set& rules = m_ctx.get_rules();
|
||||
if (rules.get_output_predicates().empty()) {
|
||||
m_context->set_unsat();
|
||||
return l_false;
|
||||
}
|
||||
|
||||
query_pred = rules.get_output_predicate();
|
||||
|
||||
IF_VERBOSE(2, m_ctx.display_rules(verbose_stream()););
|
||||
m_spacer_rules.replace_rules(rules);
|
||||
m_spacer_rules.close();
|
||||
m_ctx.record_transformed_rules();
|
||||
m_ctx.reopen();
|
||||
m_ctx.replace_rules(old_rules);
|
||||
|
||||
scoped_restore_proof _sc(m); // update_rules may overwrite the proof mode.
|
||||
|
||||
m_context->set_proof_converter(m_ctx.get_proof_converter());
|
||||
m_context->set_model_converter(m_ctx.get_model_converter());
|
||||
m_context->set_query(query_pred);
|
||||
m_context->set_axioms(bg_assertion);
|
||||
m_context->update_rules(m_spacer_rules);
|
||||
|
||||
if (m_spacer_rules.get_rules().empty()) {
|
||||
m_context->set_unsat();
|
||||
IF_VERBOSE(2, model_smt2_pp(verbose_stream(), m, *m_context->get_model(), 0););
|
||||
return l_false;
|
||||
}
|
||||
|
||||
return m_context->solve();
|
||||
|
||||
}
|
||||
|
||||
lbool dl_interface::query_from_lvl(expr * query, unsigned lvl)
|
||||
{
|
||||
//we restore the initial state in the datalog context
|
||||
m_ctx.ensure_opened();
|
||||
m_refs.reset();
|
||||
m_pred2slice.reset();
|
||||
ast_manager& m = m_ctx.get_manager();
|
||||
datalog::rule_manager& rm = m_ctx.get_rule_manager();
|
||||
datalog::rule_set& rules0 = m_ctx.get_rules();
|
||||
datalog::rule_set old_rules(rules0);
|
||||
func_decl_ref query_pred(m);
|
||||
rm.mk_query(query, m_ctx.get_rules());
|
||||
expr_ref bg_assertion = m_ctx.get_background_assertion();
|
||||
|
||||
check_reset();
|
||||
|
||||
TRACE("spacer",
|
||||
if (!m.is_true(bg_assertion)) {
|
||||
tout << "axioms:\n";
|
||||
tout << mk_pp(bg_assertion, m) << "\n";
|
||||
}
|
||||
tout << "query: " << mk_pp(query, m) << "\n";
|
||||
tout << "rules:\n";
|
||||
m_ctx.display_rules(tout);
|
||||
);
|
||||
|
||||
|
||||
apply_default_transformation(m_ctx);
|
||||
|
||||
if (m_ctx.get_params().xform_slice()) {
|
||||
datalog::rule_transformer transformer(m_ctx);
|
||||
datalog::mk_slice* slice = alloc(datalog::mk_slice, m_ctx);
|
||||
transformer.register_plugin(slice);
|
||||
m_ctx.transform_rules(transformer);
|
||||
|
||||
// track sliced predicates.
|
||||
obj_map<func_decl, func_decl*> const& preds = slice->get_predicates();
|
||||
obj_map<func_decl, func_decl*>::iterator it = preds.begin();
|
||||
obj_map<func_decl, func_decl*>::iterator end = preds.end();
|
||||
for (; it != end; ++it) {
|
||||
m_pred2slice.insert(it->m_key, it->m_value);
|
||||
m_refs.push_back(it->m_key);
|
||||
m_refs.push_back(it->m_value);
|
||||
}
|
||||
}
|
||||
|
||||
if (m_ctx.get_params().xform_unfold_rules() > 0) {
|
||||
unsigned num_unfolds = m_ctx.get_params().xform_unfold_rules();
|
||||
datalog::rule_transformer transf1(m_ctx), transf2(m_ctx);
|
||||
transf1.register_plugin(alloc(datalog::mk_coalesce, m_ctx));
|
||||
transf2.register_plugin(alloc(datalog::mk_unfold, m_ctx));
|
||||
if (m_ctx.get_params().xform_coalesce_rules()) {
|
||||
m_ctx.transform_rules(transf1);
|
||||
}
|
||||
while (num_unfolds > 0) {
|
||||
m_ctx.transform_rules(transf2);
|
||||
--num_unfolds;
|
||||
}
|
||||
}
|
||||
|
||||
const datalog::rule_set& rules = m_ctx.get_rules();
|
||||
if (rules.get_output_predicates().empty()) {
|
||||
|
||||
m_context->set_unsat();
|
||||
return l_false;
|
||||
}
|
||||
|
||||
query_pred = rules.get_output_predicate();
|
||||
|
||||
IF_VERBOSE(2, m_ctx.display_rules(verbose_stream()););
|
||||
m_spacer_rules.replace_rules(rules);
|
||||
m_spacer_rules.close();
|
||||
m_ctx.record_transformed_rules();
|
||||
m_ctx.reopen();
|
||||
m_ctx.replace_rules(old_rules);
|
||||
|
||||
scoped_restore_proof _sc(m); // update_rules may overwrite the proof mode.
|
||||
|
||||
m_context->set_proof_converter(m_ctx.get_proof_converter());
|
||||
m_context->set_model_converter(m_ctx.get_model_converter());
|
||||
m_context->set_query(query_pred);
|
||||
m_context->set_axioms(bg_assertion);
|
||||
m_context->update_rules(m_spacer_rules);
|
||||
|
||||
if (m_spacer_rules.get_rules().empty()) {
|
||||
m_context->set_unsat();
|
||||
IF_VERBOSE(1, model_smt2_pp(verbose_stream(), m, *m_context->get_model(), 0););
|
||||
return l_false;
|
||||
}
|
||||
|
||||
return m_context->solve(lvl);
|
||||
|
||||
}
|
||||
|
||||
expr_ref dl_interface::get_cover_delta(int level, func_decl* pred_orig)
|
||||
{
|
||||
func_decl* pred = pred_orig;
|
||||
m_pred2slice.find(pred_orig, pred);
|
||||
SASSERT(pred);
|
||||
return m_context->get_cover_delta(level, pred_orig, pred);
|
||||
}
|
||||
|
||||
void dl_interface::add_cover(int level, func_decl* pred, expr* property)
|
||||
{
|
||||
if (m_ctx.get_params().xform_slice()) {
|
||||
throw default_exception("Covers are incompatible with slicing. Disable slicing before using covers");
|
||||
}
|
||||
m_context->add_cover(level, pred, property);
|
||||
}
|
||||
|
||||
void dl_interface::add_invariant(func_decl* pred, expr* property)
|
||||
{
|
||||
if (m_ctx.get_params().xform_slice()) {
|
||||
throw default_exception("Invariants are incompatible with slicing. Disable slicing before using invariants");
|
||||
}
|
||||
m_context->add_invariant(pred, property);
|
||||
}
|
||||
|
||||
expr_ref dl_interface::get_reachable(func_decl* pred)
|
||||
{
|
||||
if (m_ctx.get_params().xform_slice()) {
|
||||
throw default_exception("Invariants are incompatible with slicing. "
|
||||
"Disable slicing before using invariants");
|
||||
}
|
||||
return m_context->get_reachable(pred);
|
||||
}
|
||||
|
||||
unsigned dl_interface::get_num_levels(func_decl* pred)
|
||||
{
|
||||
m_pred2slice.find(pred, pred);
|
||||
SASSERT(pred);
|
||||
return m_context->get_num_levels(pred);
|
||||
}
|
||||
|
||||
void dl_interface::collect_statistics(statistics& st) const
|
||||
{
|
||||
m_context->collect_statistics(st);
|
||||
}
|
||||
|
||||
void dl_interface::reset_statistics()
|
||||
{
|
||||
m_context->reset_statistics();
|
||||
}
|
||||
|
||||
void dl_interface::display_certificate(std::ostream& out) const
|
||||
{
|
||||
m_context->display_certificate(out);
|
||||
}
|
||||
|
||||
expr_ref dl_interface::get_answer()
|
||||
{
|
||||
return m_context->get_answer();
|
||||
}
|
||||
|
||||
expr_ref dl_interface::get_ground_sat_answer()
|
||||
{
|
||||
return m_context->get_ground_sat_answer();
|
||||
}
|
||||
|
||||
void dl_interface::get_rules_along_trace(datalog::rule_ref_vector& rules)
|
||||
{
|
||||
m_context->get_rules_along_trace(rules);
|
||||
}
|
||||
|
||||
void dl_interface::updt_params()
|
||||
{
|
||||
dealloc(m_context);
|
||||
m_context = alloc(spacer::context, m_ctx.get_params(), m_ctx.get_manager());
|
||||
}
|
||||
|
||||
model_ref dl_interface::get_model()
|
||||
{
|
||||
return m_context->get_model();
|
||||
}
|
||||
|
||||
proof_ref dl_interface::get_proof()
|
||||
{
|
||||
return m_context->get_proof();
|
||||
}
|
86
src/muz/spacer/spacer_dl_interface.h
Normal file
86
src/muz/spacer/spacer_dl_interface.h
Normal file
|
@ -0,0 +1,86 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Microsoft Corporation and Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_dl_interface.h
|
||||
|
||||
Abstract:
|
||||
|
||||
SMT2 interface for the datalog SPACER
|
||||
|
||||
Author:
|
||||
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef _SPACER_DL_INTERFACE_H_
|
||||
#define _SPACER_DL_INTERFACE_H_
|
||||
|
||||
#include "util/lbool.h"
|
||||
#include "muz/base/dl_rule.h"
|
||||
#include "muz/base/dl_rule_set.h"
|
||||
#include "muz/base/dl_engine_base.h"
|
||||
#include "util/statistics.h"
|
||||
|
||||
namespace datalog {
|
||||
class context;
|
||||
}
|
||||
|
||||
namespace spacer {
|
||||
|
||||
class context;
|
||||
|
||||
class dl_interface : public datalog::engine_base {
|
||||
datalog::context& m_ctx;
|
||||
datalog::rule_set m_spacer_rules;
|
||||
datalog::rule_set m_old_rules;
|
||||
context* m_context;
|
||||
obj_map<func_decl, func_decl*> m_pred2slice;
|
||||
ast_ref_vector m_refs;
|
||||
|
||||
void check_reset();
|
||||
|
||||
public:
|
||||
dl_interface(datalog::context& ctx);
|
||||
~dl_interface();
|
||||
|
||||
lbool query(expr* query);
|
||||
|
||||
lbool query_from_lvl(expr* query, unsigned lvl);
|
||||
|
||||
void display_certificate(std::ostream& out) const;
|
||||
|
||||
void collect_statistics(statistics& st) const;
|
||||
|
||||
void reset_statistics();
|
||||
|
||||
expr_ref get_answer();
|
||||
|
||||
expr_ref get_ground_sat_answer();
|
||||
|
||||
void get_rules_along_trace(datalog::rule_ref_vector& rules);
|
||||
|
||||
unsigned get_num_levels(func_decl* pred);
|
||||
|
||||
expr_ref get_cover_delta(int level, func_decl* pred);
|
||||
|
||||
void add_cover(int level, func_decl* pred, expr* property);
|
||||
|
||||
void add_invariant(func_decl* pred, expr* property);
|
||||
|
||||
expr_ref get_reachable(func_decl *pred);
|
||||
|
||||
void updt_params();
|
||||
|
||||
model_ref get_model();
|
||||
|
||||
proof_ref get_proof();
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
#endif
|
439
src/muz/spacer/spacer_farkas_learner.cpp
Normal file
439
src/muz/spacer/spacer_farkas_learner.cpp
Normal file
|
@ -0,0 +1,439 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_farkas_learner.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
Proviced abstract interface and some inplementations of algorithms
|
||||
for strenghtning lemmas
|
||||
|
||||
Author:
|
||||
|
||||
Krystof Hoder (t-khoder) 2011-11-1.
|
||||
|
||||
Revision History:
|
||||
// TODO: what to write here
|
||||
--*/
|
||||
|
||||
//TODO: reorder, delete unnecessary includes
|
||||
#include "ast/ast_smt2_pp.h"
|
||||
#include "ast/array_decl_plugin.h"
|
||||
#include "ast/rewriter/bool_rewriter.h"
|
||||
#include "ast/dl_decl_plugin.h"
|
||||
#include "ast/for_each_expr.h"
|
||||
#include "muz/base/dl_util.h"
|
||||
#include "ast/rewriter/rewriter.h"
|
||||
#include "ast/rewriter/rewriter_def.h"
|
||||
#include "muz/spacer/spacer_util.h"
|
||||
#include "muz/spacer/spacer_farkas_learner.h"
|
||||
#include "ast/rewriter/th_rewriter.h"
|
||||
#include "ast/ast_ll_pp.h"
|
||||
#include "muz/base/proof_utils.h"
|
||||
#include "ast/reg_decl_plugins.h"
|
||||
#include "smt/smt_farkas_util.h"
|
||||
|
||||
namespace spacer {
|
||||
|
||||
class collect_pure_proc {
|
||||
func_decl_set& m_symbs;
|
||||
public:
|
||||
collect_pure_proc(func_decl_set& s): m_symbs(s) {}
|
||||
|
||||
void operator()(app* a)
|
||||
{
|
||||
if (a->get_family_id() == null_family_id) {
|
||||
m_symbs.insert(a->get_decl());
|
||||
}
|
||||
}
|
||||
void operator()(var*) {}
|
||||
void operator()(quantifier*) {}
|
||||
};
|
||||
|
||||
void farkas_learner::combine_constraints(unsigned n, app * const * lits, rational const * coeffs, expr_ref& res)
|
||||
{
|
||||
ast_manager& m = res.get_manager();
|
||||
smt::farkas_util res_c(m);
|
||||
res_c.set_split_literals(m_split_literals);
|
||||
for (unsigned i = 0; i < n; ++i) {
|
||||
res_c.add(coeffs[i], lits[i]);
|
||||
}
|
||||
res = res_c.get();
|
||||
}
|
||||
|
||||
// every uninterpreted symbol is in symbs
|
||||
class is_pure_expr_proc {
|
||||
func_decl_set const& m_symbs;
|
||||
array_util m_au;
|
||||
public:
|
||||
struct non_pure {};
|
||||
|
||||
is_pure_expr_proc(func_decl_set const& s, ast_manager& m):
|
||||
m_symbs(s),
|
||||
m_au(m)
|
||||
{}
|
||||
|
||||
void operator()(app* a)
|
||||
{
|
||||
if (a->get_family_id() == null_family_id) {
|
||||
if (!m_symbs.contains(a->get_decl())) {
|
||||
throw non_pure();
|
||||
}
|
||||
} else if (a->get_family_id() == m_au.get_family_id() &&
|
||||
a->is_app_of(a->get_family_id(), OP_ARRAY_EXT)) {
|
||||
throw non_pure();
|
||||
}
|
||||
}
|
||||
void operator()(var*) {}
|
||||
void operator()(quantifier*) {}
|
||||
};
|
||||
|
||||
bool farkas_learner::is_pure_expr(func_decl_set const& symbs, expr* e, ast_manager& m) const
|
||||
{
|
||||
is_pure_expr_proc proc(symbs, m);
|
||||
try {
|
||||
for_each_expr(proc, e);
|
||||
} catch (is_pure_expr_proc::non_pure) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
Revised version of Farkas strengthener.
|
||||
1. Mark B-pure nodes as derivations that depend only on B.
|
||||
2. Collect B-influenced nodes
|
||||
3. (optional) Permute B-pure units over resolution steps to narrow dependencies on B.
|
||||
4. Weaken B-pure units for resolution with Farkas Clauses.
|
||||
5. Add B-pure units elsewhere.
|
||||
|
||||
Rules:
|
||||
- hypothesis h |- h
|
||||
|
||||
H |- false
|
||||
- lemma ----------
|
||||
|- not H
|
||||
|
||||
Th |- L \/ C H |- not L
|
||||
- th-lemma -------------------------
|
||||
H |- C
|
||||
|
||||
Note: C is false for theory axioms, C is unit literal for propagation.
|
||||
|
||||
- rewrite |- t = s
|
||||
|
||||
H |- t = s
|
||||
- monotonicity ----------------
|
||||
H |- f(t) = f(s)
|
||||
|
||||
H |- t = s H' |- s = u
|
||||
- trans ----------------------
|
||||
H, H' |- t = u
|
||||
|
||||
H |- C \/ L H' |- not L
|
||||
- unit_resolve ------------------------
|
||||
H, H' |- C
|
||||
|
||||
H |- a ~ b H' |- a
|
||||
- mp --------------------
|
||||
H, H' |- b
|
||||
|
||||
- def-axiom |- C
|
||||
|
||||
- asserted |- f
|
||||
|
||||
Mark nodes by:
|
||||
- Hypotheses
|
||||
- Dependency on bs
|
||||
- Dependency on A
|
||||
|
||||
A node is unit derivable from bs if:
|
||||
- It has no hypotheses.
|
||||
- It depends on bs.
|
||||
- It does not depend on A.
|
||||
|
||||
NB: currently unit derivable is not symmetric: A clause can be
|
||||
unit derivable, but a unit literal with hypotheses is not.
|
||||
This is clearly wrong, because hypotheses are just additional literals
|
||||
in a clausal version.
|
||||
|
||||
NB: the routine is not interpolating, though an interpolating variant would
|
||||
be preferrable because then we can also use it for model propagation.
|
||||
|
||||
We collect the unit derivable nodes from bs.
|
||||
These are the weakenings of bs, besides the
|
||||
units under Farkas.
|
||||
|
||||
*/
|
||||
|
||||
#define INSERT(_x_) if (!lemma_set.contains(_x_)) { lemma_set.insert(_x_); lemmas.push_back(_x_); }
|
||||
|
||||
void farkas_learner::get_lemmas(proof* root, expr_set const& bs, expr_ref_vector& lemmas)
|
||||
{
|
||||
ast_manager& m = lemmas.get_manager();
|
||||
bool_rewriter brwr(m);
|
||||
func_decl_set Bsymbs;
|
||||
collect_pure_proc collect_proc(Bsymbs);
|
||||
expr_set::iterator it = bs.begin(), end = bs.end();
|
||||
for (; it != end; ++it) {
|
||||
for_each_expr(collect_proc, *it);
|
||||
}
|
||||
|
||||
proof_ref pr(root, m);
|
||||
proof_utils::reduce_hypotheses(pr);
|
||||
proof_utils::permute_unit_resolution(pr);
|
||||
IF_VERBOSE(3, verbose_stream() << "Reduced proof:\n" << mk_ismt2_pp(pr, m) << "\n";);
|
||||
|
||||
ptr_vector<expr_set> hyprefs;
|
||||
obj_map<expr, expr_set*> hypmap;
|
||||
obj_hashtable<expr> lemma_set;
|
||||
ast_mark b_depend, a_depend, visited, b_closed;
|
||||
expr_set* empty_set = alloc(expr_set);
|
||||
hyprefs.push_back(empty_set);
|
||||
ptr_vector<proof> todo;
|
||||
TRACE("spacer_verbose", tout << mk_pp(pr, m) << "\n";);
|
||||
todo.push_back(pr);
|
||||
while (!todo.empty()) {
|
||||
proof* p = todo.back();
|
||||
SASSERT(m.is_proof(p));
|
||||
if (visited.is_marked(p)) {
|
||||
todo.pop_back();
|
||||
continue;
|
||||
}
|
||||
bool all_visit = true;
|
||||
for (unsigned i = 0; i < m.get_num_parents(p); ++i) {
|
||||
expr* arg = p->get_arg(i);
|
||||
SASSERT(m.is_proof(arg));
|
||||
if (!visited.is_marked(arg)) {
|
||||
all_visit = false;
|
||||
todo.push_back(to_app(arg));
|
||||
}
|
||||
}
|
||||
if (!all_visit) {
|
||||
continue;
|
||||
}
|
||||
visited.mark(p, true);
|
||||
todo.pop_back();
|
||||
|
||||
// retrieve hypotheses and dependencies on A, bs.
|
||||
bool b_dep = false, a_dep = false;
|
||||
expr_set* hyps = empty_set;
|
||||
for (unsigned i = 0; i < m.get_num_parents(p); ++i) {
|
||||
expr* arg = p->get_arg(i);
|
||||
a_dep = a_dep || a_depend.is_marked(arg);
|
||||
b_dep = b_dep || b_depend.is_marked(arg);
|
||||
expr_set* hyps2 = hypmap.find(arg);
|
||||
if (hyps != hyps2 && !hyps2->empty()) {
|
||||
if (hyps->empty()) {
|
||||
hyps = hyps2;
|
||||
} else {
|
||||
expr_set* hyps3 = alloc(expr_set);
|
||||
datalog::set_union(*hyps3, *hyps);
|
||||
datalog::set_union(*hyps3, *hyps2);
|
||||
hyps = hyps3;
|
||||
hyprefs.push_back(hyps);
|
||||
}
|
||||
}
|
||||
}
|
||||
hypmap.insert(p, hyps);
|
||||
a_depend.mark(p, a_dep);
|
||||
b_depend.mark(p, b_dep);
|
||||
|
||||
#define IS_B_PURE(_p) (b_depend.is_marked(_p) && !a_depend.is_marked(_p) && hypmap.find(_p)->empty())
|
||||
|
||||
|
||||
// Add lemmas that depend on bs, have no hypotheses, don't depend on A.
|
||||
if ((!hyps->empty() || a_depend.is_marked(p)) &&
|
||||
b_depend.is_marked(p) && !is_farkas_lemma(m, p)) {
|
||||
for (unsigned i = 0; i < m.get_num_parents(p); ++i) {
|
||||
app* arg = to_app(p->get_arg(i));
|
||||
if (IS_B_PURE(arg)) {
|
||||
expr* fact = m.get_fact(arg);
|
||||
if (is_pure_expr(Bsymbs, fact, m)) {
|
||||
TRACE("farkas_learner2",
|
||||
tout << "Add: " << mk_pp(m.get_fact(arg), m) << "\n";
|
||||
tout << mk_pp(arg, m) << "\n";
|
||||
);
|
||||
INSERT(fact);
|
||||
} else {
|
||||
get_asserted(p, bs, b_closed, lemma_set, lemmas);
|
||||
b_closed.mark(p, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch (p->get_decl_kind()) {
|
||||
case PR_ASSERTED:
|
||||
if (bs.contains(m.get_fact(p))) {
|
||||
b_depend.mark(p, true);
|
||||
} else {
|
||||
a_depend.mark(p, true);
|
||||
}
|
||||
break;
|
||||
case PR_HYPOTHESIS: {
|
||||
SASSERT(hyps == empty_set);
|
||||
hyps = alloc(expr_set);
|
||||
hyps->insert(m.get_fact(p));
|
||||
hyprefs.push_back(hyps);
|
||||
hypmap.insert(p, hyps);
|
||||
break;
|
||||
}
|
||||
case PR_DEF_AXIOM: {
|
||||
if (!is_pure_expr(Bsymbs, m.get_fact(p), m)) {
|
||||
a_depend.mark(p, true);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case PR_LEMMA: {
|
||||
expr_set* hyps2 = alloc(expr_set);
|
||||
hyprefs.push_back(hyps2);
|
||||
datalog::set_union(*hyps2, *hyps);
|
||||
hyps = hyps2;
|
||||
expr* fml = m.get_fact(p);
|
||||
hyps->remove(fml);
|
||||
if (m.is_or(fml)) {
|
||||
for (unsigned i = 0; i < to_app(fml)->get_num_args(); ++i) {
|
||||
expr* f = to_app(fml)->get_arg(i);
|
||||
expr_ref hyp(m);
|
||||
brwr.mk_not(f, hyp);
|
||||
hyps->remove(hyp);
|
||||
}
|
||||
}
|
||||
hypmap.insert(p, hyps);
|
||||
break;
|
||||
}
|
||||
case PR_TH_LEMMA: {
|
||||
if (!is_farkas_lemma(m, p)) { break; }
|
||||
|
||||
SASSERT(m.has_fact(p));
|
||||
unsigned prem_cnt = m.get_num_parents(p);
|
||||
func_decl * d = p->get_decl();
|
||||
SASSERT(d->get_num_parameters() >= prem_cnt + 2);
|
||||
SASSERT(d->get_parameter(0).get_symbol() == "arith");
|
||||
SASSERT(d->get_parameter(1).get_symbol() == "farkas");
|
||||
parameter const* params = d->get_parameters() + 2;
|
||||
|
||||
app_ref_vector lits(m);
|
||||
expr_ref tmp(m);
|
||||
unsigned num_b_pures = 0;
|
||||
rational coef;
|
||||
vector<rational> coeffs;
|
||||
|
||||
TRACE("farkas_learner2",
|
||||
for (unsigned i = 0; i < prem_cnt; ++i) {
|
||||
VERIFY(params[i].is_rational(coef));
|
||||
proof* prem = to_app(p->get_arg(i));
|
||||
bool b_pure = IS_B_PURE(prem);
|
||||
tout << (b_pure ? "B" : "A") << " " << coef << " " << mk_pp(m.get_fact(prem), m) << "\n";
|
||||
}
|
||||
tout << mk_pp(m.get_fact(p), m) << "\n";
|
||||
);
|
||||
|
||||
// NB. Taking 'abs' of coefficients is a workaround.
|
||||
// The Farkas coefficient extraction in arith_core must be wrong.
|
||||
// The coefficients would be always positive relative to the theory lemma.
|
||||
|
||||
for (unsigned i = 0; i < prem_cnt; ++i) {
|
||||
expr * prem_e = p->get_arg(i);
|
||||
SASSERT(is_app(prem_e));
|
||||
proof * prem = to_app(prem_e);
|
||||
|
||||
if (IS_B_PURE(prem)) {
|
||||
++num_b_pures;
|
||||
} else {
|
||||
VERIFY(params[i].is_rational(coef));
|
||||
lits.push_back(to_app(m.get_fact(prem)));
|
||||
coeffs.push_back(abs(coef));
|
||||
}
|
||||
}
|
||||
params += prem_cnt;
|
||||
if (prem_cnt + 2 < d->get_num_parameters()) {
|
||||
unsigned num_args = 1;
|
||||
expr* fact = m.get_fact(p);
|
||||
expr* const* args = &fact;
|
||||
if (m.is_or(fact)) {
|
||||
app* _or = to_app(fact);
|
||||
num_args = _or->get_num_args();
|
||||
args = _or->get_args();
|
||||
}
|
||||
SASSERT(prem_cnt + 2 + num_args == d->get_num_parameters());
|
||||
for (unsigned i = 0; i < num_args; ++i) {
|
||||
expr* prem_e = args[i];
|
||||
brwr.mk_not(prem_e, tmp);
|
||||
VERIFY(params[i].is_rational(coef));
|
||||
SASSERT(is_app(tmp));
|
||||
lits.push_back(to_app(tmp));
|
||||
coeffs.push_back(abs(coef));
|
||||
}
|
||||
|
||||
}
|
||||
SASSERT(coeffs.size() == lits.size());
|
||||
if (num_b_pures > 0) {
|
||||
expr_ref res(m);
|
||||
combine_constraints(coeffs.size(), lits.c_ptr(), coeffs.c_ptr(), res);
|
||||
TRACE("farkas_learner2", tout << "Add: " << mk_pp(res, m) << "\n";);
|
||||
INSERT(res);
|
||||
b_closed.mark(p, true);
|
||||
}
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
std::for_each(hyprefs.begin(), hyprefs.end(), delete_proc<expr_set>());
|
||||
simplify_bounds(lemmas);
|
||||
}
|
||||
|
||||
void farkas_learner::get_asserted(proof* p, expr_set const& bs, ast_mark& b_closed, obj_hashtable<expr>& lemma_set, expr_ref_vector& lemmas)
|
||||
{
|
||||
ast_manager& m = lemmas.get_manager();
|
||||
ast_mark visited;
|
||||
proof* p0 = p;
|
||||
ptr_vector<proof> todo;
|
||||
todo.push_back(p);
|
||||
|
||||
while (!todo.empty()) {
|
||||
p = todo.back();
|
||||
todo.pop_back();
|
||||
if (visited.is_marked(p) || b_closed.is_marked(p)) {
|
||||
continue;
|
||||
}
|
||||
visited.mark(p, true);
|
||||
for (unsigned i = 0; i < m.get_num_parents(p); ++i) {
|
||||
expr* arg = p->get_arg(i);
|
||||
SASSERT(m.is_proof(arg));
|
||||
todo.push_back(to_app(arg));
|
||||
}
|
||||
if (p->get_decl_kind() == PR_ASSERTED &&
|
||||
bs.contains(m.get_fact(p))) {
|
||||
expr* fact = m.get_fact(p);
|
||||
TRACE("farkas_learner2",
|
||||
tout << mk_ll_pp(p0, m) << "\n";
|
||||
tout << "Add: " << mk_pp(p, m) << "\n";);
|
||||
INSERT(fact);
|
||||
b_closed.mark(p, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool farkas_learner::is_farkas_lemma(ast_manager& m, expr* e)
|
||||
{
|
||||
app * a;
|
||||
func_decl* d;
|
||||
symbol sym;
|
||||
return
|
||||
is_app(e) &&
|
||||
(a = to_app(e), d = a->get_decl(), true) &&
|
||||
PR_TH_LEMMA == a->get_decl_kind() &&
|
||||
d->get_num_parameters() >= 2 &&
|
||||
d->get_parameter(0).is_symbol(sym) && sym == "arith" &&
|
||||
d->get_parameter(1).is_symbol(sym) && sym == "farkas" &&
|
||||
d->get_num_parameters() >= m.get_num_parents(to_app(e)) + 2;
|
||||
}
|
||||
}
|
66
src/muz/spacer/spacer_farkas_learner.h
Normal file
66
src/muz/spacer/spacer_farkas_learner.h
Normal file
|
@ -0,0 +1,66 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_farkas_learner.h
|
||||
|
||||
Abstract:
|
||||
|
||||
SMT2 interface for the datalog SPACER
|
||||
|
||||
Author:
|
||||
|
||||
Krystof Hoder (t-khoder) 2011-11-1.
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef _SPACER_FARKAS_LEARNER_H_
|
||||
#define _SPACER_FARKAS_LEARNER_H_
|
||||
|
||||
#include "ast/ast.h"
|
||||
|
||||
namespace spacer {
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class farkas_learner {
|
||||
typedef obj_hashtable<expr> expr_set;
|
||||
|
||||
bool m_split_literals;
|
||||
|
||||
void combine_constraints(unsigned cnt, app * const * constrs, rational const * coeffs, expr_ref& res);
|
||||
|
||||
bool is_farkas_lemma(ast_manager& m, expr* e);
|
||||
|
||||
void get_asserted(proof* p, expr_set const& bs, ast_mark& b_closed, obj_hashtable<expr>& lemma_set, expr_ref_vector& lemmas);
|
||||
|
||||
bool is_pure_expr(func_decl_set const& symbs, expr* e, ast_manager& m) const;
|
||||
|
||||
public:
|
||||
farkas_learner(): m_split_literals(false) {}
|
||||
|
||||
/**
|
||||
Traverse a proof and retrieve lemmas using the vocabulary from bs.
|
||||
*/
|
||||
void get_lemmas(proof* root, expr_set const& bs, expr_ref_vector& lemmas);
|
||||
|
||||
void collect_statistics(statistics& st) const {}
|
||||
void reset_statistics() {}
|
||||
|
||||
|
||||
/** \brief see smt::farkas_util::set_split_literals */
|
||||
void set_split_literals(bool v) {m_split_literals = v;}
|
||||
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
|
||||
#endif
|
294
src/muz/spacer/spacer_generalizers.cpp
Normal file
294
src/muz/spacer/spacer_generalizers.cpp
Normal file
|
@ -0,0 +1,294 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Microsoft Corporation and Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_generalizers.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
Lemma generalizers.
|
||||
|
||||
Author:
|
||||
|
||||
Nikolaj Bjorner (nbjorner) 2011-11-20.
|
||||
Arie Gurfinkel
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
|
||||
#include "muz/spacer/spacer_context.h"
|
||||
#include "muz/spacer/spacer_generalizers.h"
|
||||
#include "ast/expr_abstract.h"
|
||||
#include "ast/rewriter/var_subst.h"
|
||||
#include "ast/for_each_expr.h"
|
||||
#include "muz/spacer/obj_equiv_class.h"
|
||||
|
||||
|
||||
namespace spacer {
|
||||
void lemma_sanity_checker::operator()(lemma_ref &lemma) {
|
||||
unsigned uses_level;
|
||||
expr_ref_vector cube(lemma->get_ast_manager());
|
||||
cube.append(lemma->get_cube());
|
||||
ENSURE(lemma->get_pob()->pt().check_inductive(lemma->level(),
|
||||
cube, uses_level));
|
||||
}
|
||||
|
||||
|
||||
// ------------------------
|
||||
// lemma_bool_inductive_generalizer
|
||||
/// Inductive generalization by dropping and expanding literals
|
||||
void lemma_bool_inductive_generalizer::operator()(lemma_ref &lemma) {
|
||||
if (lemma->get_cube().empty()) return;
|
||||
|
||||
m_st.count++;
|
||||
scoped_watch _w_(m_st.watch);
|
||||
|
||||
unsigned uses_level;
|
||||
pred_transformer &pt = lemma->get_pob()->pt();
|
||||
ast_manager &m = pt.get_ast_manager();
|
||||
|
||||
expr_ref_vector cube(m);
|
||||
cube.append(lemma->get_cube());
|
||||
|
||||
bool dirty = false;
|
||||
expr_ref true_expr(m.mk_true(), m);
|
||||
ptr_vector<expr> processed;
|
||||
expr_ref_vector extra_lits(m);
|
||||
|
||||
unsigned i = 0, num_failures = 0;
|
||||
while (i < cube.size() &&
|
||||
(!m_failure_limit || num_failures < m_failure_limit)) {
|
||||
expr_ref lit(m);
|
||||
lit = cube.get(i);
|
||||
cube[i] = true_expr;
|
||||
if (cube.size() > 1 &&
|
||||
pt.check_inductive(lemma->level(), cube, uses_level)) {
|
||||
num_failures = 0;
|
||||
dirty = true;
|
||||
for (i = 0; i < cube.size() &&
|
||||
processed.contains(cube.get(i)); ++i);
|
||||
} else {
|
||||
// check if the literal can be expanded and any single
|
||||
// literal in the expansion can replace it
|
||||
extra_lits.reset();
|
||||
extra_lits.push_back(lit);
|
||||
expand_literals(m, extra_lits);
|
||||
SASSERT(extra_lits.size() > 0);
|
||||
bool found = false;
|
||||
if (extra_lits.get(0) != lit) {
|
||||
SASSERT(extra_lits.size() > 1);
|
||||
for (unsigned j = 0, sz = extra_lits.size(); !found && j < sz; ++j) {
|
||||
cube[i] = extra_lits.get(j);
|
||||
if (pt.check_inductive(lemma->level(), cube, uses_level)) {
|
||||
num_failures = 0;
|
||||
dirty = true;
|
||||
found = true;
|
||||
processed.push_back(extra_lits.get(j));
|
||||
for (i = 0; i < cube.size() &&
|
||||
processed.contains(cube.get(i)); ++i);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
cube[i] = lit;
|
||||
processed.push_back(lit);
|
||||
++num_failures;
|
||||
++m_st.num_failures;
|
||||
++i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (dirty) {
|
||||
TRACE("spacer",
|
||||
tout << "Generalized from:\n" << mk_and(lemma->get_cube())
|
||||
<< "\ninto\n" << mk_and(cube) << "\n";);
|
||||
|
||||
lemma->update_cube(lemma->get_pob(), cube);
|
||||
SASSERT(uses_level >= lemma->level());
|
||||
lemma->set_level(uses_level);
|
||||
}
|
||||
}
|
||||
|
||||
void lemma_bool_inductive_generalizer::collect_statistics(statistics &st) const
|
||||
{
|
||||
st.update("time.spacer.solve.reach.gen.bool_ind", m_st.watch.get_seconds());
|
||||
st.update("bool inductive gen", m_st.count);
|
||||
st.update("bool inductive gen failures", m_st.num_failures);
|
||||
}
|
||||
|
||||
void unsat_core_generalizer::operator()(lemma_ref &lemma)
|
||||
{
|
||||
m_st.count++;
|
||||
scoped_watch _w_(m_st.watch);
|
||||
ast_manager &m = lemma->get_ast_manager();
|
||||
|
||||
pred_transformer &pt = lemma->get_pob()->pt();
|
||||
|
||||
unsigned old_sz = lemma->get_cube().size();
|
||||
unsigned old_level = lemma->level();
|
||||
|
||||
unsigned uses_level;
|
||||
expr_ref_vector core(m);
|
||||
bool r;
|
||||
r = pt.is_invariant(lemma->level(), lemma->get_expr(), uses_level, &core);
|
||||
SASSERT(r);
|
||||
|
||||
CTRACE("spacer", old_sz > core.size(),
|
||||
tout << "unsat core reduced lemma from: "
|
||||
<< old_sz << " to " << core.size() << "\n";);
|
||||
CTRACE("spacer", old_level < uses_level,
|
||||
tout << "unsat core moved lemma up from: "
|
||||
<< old_level << " to " << uses_level << "\n";);
|
||||
if (old_sz > core.size()) {
|
||||
lemma->update_cube(lemma->get_pob(), core);
|
||||
lemma->set_level(uses_level);
|
||||
}
|
||||
}
|
||||
|
||||
void unsat_core_generalizer::collect_statistics(statistics &st) const
|
||||
{
|
||||
st.update("time.spacer.solve.reach.gen.unsat_core", m_st.watch.get_seconds());
|
||||
st.update("gen.unsat_core.cnt", m_st.count);
|
||||
st.update("gen.unsat_core.fail", m_st.num_failures);
|
||||
}
|
||||
|
||||
namespace {
|
||||
class collect_array_proc {
|
||||
array_util m_au;
|
||||
func_decl_set &m_symbs;
|
||||
sort *m_sort;
|
||||
public:
|
||||
collect_array_proc(ast_manager &m, func_decl_set& s) :
|
||||
m_au(m), m_symbs(s), m_sort(NULL) {}
|
||||
|
||||
void operator()(app* a)
|
||||
{
|
||||
if (a->get_family_id() == null_family_id && m_au.is_array(a)) {
|
||||
if (m_sort && m_sort != get_sort(a)) { return; }
|
||||
if (!m_sort) { m_sort = get_sort(a); }
|
||||
m_symbs.insert(a->get_decl());
|
||||
}
|
||||
}
|
||||
void operator()(var*) {}
|
||||
void operator()(quantifier*) {}
|
||||
};
|
||||
}
|
||||
|
||||
void lemma_array_eq_generalizer::operator() (lemma_ref &lemma)
|
||||
{
|
||||
TRACE("core_array_eq", tout << "Looking for equalities\n";);
|
||||
|
||||
// -- find array constants
|
||||
ast_manager &m = lemma->get_ast_manager();
|
||||
manager &pm = m_ctx.get_manager();
|
||||
|
||||
expr_ref_vector core(m);
|
||||
expr_ref v(m);
|
||||
func_decl_set symb;
|
||||
collect_array_proc cap(m, symb);
|
||||
|
||||
core.append (lemma->get_cube());
|
||||
v = mk_and(core);
|
||||
for_each_expr(cap, v);
|
||||
|
||||
TRACE("core_array_eq",
|
||||
tout << "found " << symb.size() << " array variables in: \n"
|
||||
<< mk_pp(v, m) << "\n";);
|
||||
|
||||
// too few constants
|
||||
if (symb.size() <= 1) { return; }
|
||||
// too many constants, skip this
|
||||
if (symb.size() >= 8) { return; }
|
||||
|
||||
|
||||
// -- for every pair of variables, try an equality
|
||||
typedef func_decl_set::iterator iterator;
|
||||
ptr_vector<func_decl> vsymbs;
|
||||
for (iterator it = symb.begin(), end = symb.end();
|
||||
it != end; ++it)
|
||||
{ vsymbs.push_back(*it); }
|
||||
|
||||
expr_ref_vector eqs(m);
|
||||
|
||||
for (unsigned i = 0, sz = vsymbs.size(); i < sz; ++i)
|
||||
for (unsigned j = i + 1; j < sz; ++j)
|
||||
{ eqs.push_back(m.mk_eq(m.mk_const(vsymbs.get(i)),
|
||||
m.mk_const(vsymbs.get(j)))); }
|
||||
|
||||
smt::kernel solver(m, m_ctx.get_manager().fparams2());
|
||||
expr_ref_vector lits(m);
|
||||
for (unsigned i = 0, core_sz = core.size(); i < core_sz; ++i) {
|
||||
SASSERT(lits.size() == i);
|
||||
solver.push();
|
||||
solver.assert_expr(core.get(i));
|
||||
for (unsigned j = 0, eqs_sz = eqs.size(); j < eqs_sz; ++j) {
|
||||
solver.push();
|
||||
solver.assert_expr(eqs.get(j));
|
||||
lbool res = solver.check();
|
||||
solver.pop(1);
|
||||
|
||||
if (res == l_false) {
|
||||
TRACE("core_array_eq",
|
||||
tout << "strengthened " << mk_pp(core.get(i), m)
|
||||
<< " with " << mk_pp(m.mk_not(eqs.get(j)), m) << "\n";);
|
||||
lits.push_back(m.mk_not(eqs.get(j)));
|
||||
break;
|
||||
}
|
||||
}
|
||||
solver.pop(1);
|
||||
if (lits.size() == i) { lits.push_back(core.get(i)); }
|
||||
}
|
||||
|
||||
/**
|
||||
HACK: if the first 3 arguments of pt are boolean, assume
|
||||
they correspond to SeaHorn encoding and condition the equality on them.
|
||||
*/
|
||||
// pred_transformer &pt = n.pt ();
|
||||
// if (pt.sig_size () >= 3 &&
|
||||
// m.is_bool (pt.sig (0)->get_range ()) &&
|
||||
// m.is_bool (pt.sig (1)->get_range ()) &&
|
||||
// m.is_bool (pt.sig (2)->get_range ()))
|
||||
// {
|
||||
// lits.push_back (m.mk_const (pm.o2n(pt.sig (0), 0)));
|
||||
// lits.push_back (m.mk_not (m.mk_const (pm.o2n(pt.sig (1), 0))));
|
||||
// lits.push_back (m.mk_not (m.mk_const (pm.o2n(pt.sig (2), 0))));
|
||||
// }
|
||||
|
||||
TRACE("core_array_eq", tout << "new possible core "
|
||||
<< mk_pp(pm.mk_and(lits), m) << "\n";);
|
||||
|
||||
|
||||
pred_transformer &pt = lemma->get_pob()->pt();
|
||||
// -- check if it is consistent with the transition relation
|
||||
unsigned uses_level1;
|
||||
if (pt.check_inductive(lemma->level(), lits, uses_level1)) {
|
||||
TRACE("core_array_eq", tout << "Inductive!\n";);
|
||||
lemma->update_cube(lemma->get_pob(),lits);
|
||||
lemma->set_level(uses_level1);
|
||||
return;
|
||||
} else
|
||||
{ TRACE("core_array_eq", tout << "Not-Inductive!\n";);}
|
||||
}
|
||||
|
||||
void lemma_eq_generalizer::operator() (lemma_ref &lemma)
|
||||
{
|
||||
TRACE("core_eq", tout << "Transforming equivalence classes\n";);
|
||||
|
||||
ast_manager &m = m_ctx.get_ast_manager();
|
||||
expr_ref_vector core(m);
|
||||
core.append (lemma->get_cube());
|
||||
|
||||
bool dirty;
|
||||
expr_equiv_class eq_classes(m);
|
||||
factor_eqs(core, eq_classes);
|
||||
// create all possible equalities to allow for simple inductive generalization
|
||||
dirty = equiv_to_expr_full(eq_classes, core);
|
||||
if (dirty) {
|
||||
lemma->update_cube(lemma->get_pob(), core);
|
||||
}
|
||||
}
|
||||
};
|
99
src/muz/spacer/spacer_generalizers.h
Normal file
99
src/muz/spacer/spacer_generalizers.h
Normal file
|
@ -0,0 +1,99 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Microsoft Corporation and Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_generalizers.h
|
||||
|
||||
Abstract:
|
||||
|
||||
Generalizer plugins.
|
||||
|
||||
Author:
|
||||
|
||||
Nikolaj Bjorner (nbjorner) 2011-11-22.
|
||||
Arie Gurfinkel
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef _SPACER_GENERALIZERS_H_
|
||||
#define _SPACER_GENERALIZERS_H_
|
||||
|
||||
#include "muz/spacer/spacer_context.h"
|
||||
#include "ast/arith_decl_plugin.h"
|
||||
|
||||
namespace spacer {
|
||||
|
||||
// can be used to check whether produced core is really implied by
|
||||
// frame and therefore valid TODO: or negation?
|
||||
class lemma_sanity_checker : public lemma_generalizer {
|
||||
public:
|
||||
lemma_sanity_checker(context& ctx) : lemma_generalizer(ctx) {}
|
||||
virtual ~lemma_sanity_checker() {}
|
||||
virtual void operator()(lemma_ref &lemma);
|
||||
};
|
||||
|
||||
/**
|
||||
* Boolean inductive generalization by dropping literals
|
||||
*/
|
||||
class lemma_bool_inductive_generalizer : public lemma_generalizer {
|
||||
|
||||
struct stats {
|
||||
unsigned count;
|
||||
unsigned num_failures;
|
||||
stopwatch watch;
|
||||
stats() {reset();}
|
||||
void reset() {count = 0; num_failures = 0; watch.reset();}
|
||||
};
|
||||
|
||||
unsigned m_failure_limit;
|
||||
stats m_st;
|
||||
|
||||
public:
|
||||
lemma_bool_inductive_generalizer(context& ctx, unsigned failure_limit) :
|
||||
lemma_generalizer(ctx), m_failure_limit(failure_limit) {}
|
||||
virtual ~lemma_bool_inductive_generalizer() {}
|
||||
virtual void operator()(lemma_ref &lemma);
|
||||
|
||||
virtual void collect_statistics(statistics& st) const;
|
||||
virtual void reset_statistics() {m_st.reset();}
|
||||
};
|
||||
|
||||
class unsat_core_generalizer : public lemma_generalizer {
|
||||
struct stats {
|
||||
unsigned count;
|
||||
unsigned num_failures;
|
||||
stopwatch watch;
|
||||
stats() { reset(); }
|
||||
void reset() {count = 0; num_failures = 0; watch.reset();}
|
||||
};
|
||||
|
||||
stats m_st;
|
||||
public:
|
||||
unsat_core_generalizer(context &ctx) : lemma_generalizer(ctx) {}
|
||||
virtual ~unsat_core_generalizer() {}
|
||||
virtual void operator()(lemma_ref &lemma);
|
||||
|
||||
virtual void collect_statistics(statistics &st) const;
|
||||
virtual void reset_statistics() {m_st.reset();}
|
||||
};
|
||||
|
||||
class lemma_array_eq_generalizer : public lemma_generalizer {
|
||||
public:
|
||||
lemma_array_eq_generalizer(context &ctx) : lemma_generalizer(ctx) {}
|
||||
virtual ~lemma_array_eq_generalizer() {}
|
||||
virtual void operator()(lemma_ref &lemma);
|
||||
|
||||
};
|
||||
|
||||
class lemma_eq_generalizer : public lemma_generalizer {
|
||||
public:
|
||||
lemma_eq_generalizer(context &ctx) : lemma_generalizer(ctx) {}
|
||||
virtual ~lemma_eq_generalizer() {}
|
||||
virtual void operator()(lemma_ref &lemma);
|
||||
};
|
||||
|
||||
|
||||
};
|
||||
#endif
|
355
src/muz/spacer/spacer_itp_solver.cpp
Normal file
355
src/muz/spacer/spacer_itp_solver.cpp
Normal file
|
@ -0,0 +1,355 @@
|
|||
/**
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_itp_solver.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
A solver that produces interpolated unsat cores
|
||||
|
||||
Author:
|
||||
|
||||
Arie Gurfinkel
|
||||
|
||||
Notes:
|
||||
|
||||
--*/
|
||||
#include"spacer_itp_solver.h"
|
||||
#include"ast.h"
|
||||
#include"spacer_util.h"
|
||||
#include"spacer_farkas_learner.h"
|
||||
#include"expr_replacer.h"
|
||||
#include "spacer_unsat_core_learner.h"
|
||||
#include "spacer_unsat_core_plugin.h"
|
||||
|
||||
namespace spacer {
|
||||
void itp_solver::push ()
|
||||
{
|
||||
m_defs.push_back (def_manager (*this));
|
||||
m_solver.push ();
|
||||
}
|
||||
|
||||
void itp_solver::pop (unsigned n)
|
||||
{
|
||||
m_solver.pop (n);
|
||||
unsigned lvl = m_defs.size ();
|
||||
SASSERT (n <= lvl);
|
||||
unsigned new_lvl = lvl-n;
|
||||
while (m_defs.size() > new_lvl) {
|
||||
m_num_proxies -= m_defs.back ().m_defs.size ();
|
||||
m_defs.pop_back ();
|
||||
}
|
||||
}
|
||||
|
||||
app* itp_solver::fresh_proxy ()
|
||||
{
|
||||
if (m_num_proxies == m_proxies.size()) {
|
||||
std::stringstream name;
|
||||
name << "spacer_proxy!" << m_proxies.size ();
|
||||
app_ref res(m);
|
||||
res = m.mk_const (symbol (name.str ().c_str ()),
|
||||
m.mk_bool_sort ());
|
||||
m_proxies.push_back (res);
|
||||
|
||||
// -- add the new proxy to proxy eliminator
|
||||
proof_ref pr(m);
|
||||
pr = m.mk_asserted (m.mk_true ());
|
||||
m_elim_proxies_sub.insert (res, m.mk_true (), pr);
|
||||
|
||||
}
|
||||
return m_proxies.get (m_num_proxies++);
|
||||
}
|
||||
|
||||
app* itp_solver::mk_proxy (expr *v)
|
||||
{
|
||||
{
|
||||
expr *e = v;
|
||||
m.is_not (v, e);
|
||||
if (is_uninterp_const(e)) { return to_app(v); }
|
||||
}
|
||||
|
||||
def_manager &def = m_defs.size () > 0 ? m_defs.back () : m_base_defs;
|
||||
return def.mk_proxy (v);
|
||||
}
|
||||
|
||||
bool itp_solver::mk_proxies (expr_ref_vector &v, unsigned from)
|
||||
{
|
||||
bool dirty = false;
|
||||
for (unsigned i = from, sz = v.size(); i < sz; ++i) {
|
||||
app *p = mk_proxy (v.get (i));
|
||||
dirty |= (v.get (i) != p);
|
||||
v[i] = p;
|
||||
}
|
||||
return dirty;
|
||||
}
|
||||
|
||||
void itp_solver::push_bg (expr *e)
|
||||
{
|
||||
if (m_assumptions.size () > m_first_assumption)
|
||||
{ m_assumptions.shrink(m_first_assumption); }
|
||||
m_assumptions.push_back (e);
|
||||
m_first_assumption = m_assumptions.size ();
|
||||
}
|
||||
|
||||
void itp_solver::pop_bg (unsigned n)
|
||||
{
|
||||
if (n == 0) { return; }
|
||||
|
||||
if (m_assumptions.size () > m_first_assumption)
|
||||
{ m_assumptions.shrink(m_first_assumption); }
|
||||
m_first_assumption = m_first_assumption > n ? m_first_assumption - n : 0;
|
||||
m_assumptions.shrink (m_first_assumption);
|
||||
}
|
||||
|
||||
unsigned itp_solver::get_num_bg () {return m_first_assumption;}
|
||||
|
||||
lbool itp_solver::check_sat (unsigned num_assumptions, expr * const *assumptions)
|
||||
{
|
||||
// -- remove any old assumptions
|
||||
if (m_assumptions.size () > m_first_assumption)
|
||||
{ m_assumptions.shrink(m_first_assumption); }
|
||||
|
||||
// -- replace theory literals in background assumptions with proxies
|
||||
mk_proxies (m_assumptions);
|
||||
// -- in case mk_proxies added new literals, they are all background
|
||||
m_first_assumption = m_assumptions.size ();
|
||||
|
||||
m_assumptions.append (num_assumptions, assumptions);
|
||||
m_is_proxied = mk_proxies (m_assumptions, m_first_assumption);
|
||||
|
||||
lbool res;
|
||||
res = m_solver.check_sat (m_assumptions.size (), m_assumptions.c_ptr ());
|
||||
set_status (res);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
app* itp_solver::def_manager::mk_proxy (expr *v)
|
||||
{
|
||||
app* r;
|
||||
if (m_expr2proxy.find(v, r)) { return r; }
|
||||
|
||||
ast_manager &m = m_parent.m;
|
||||
app_ref proxy(m);
|
||||
app_ref def(m);
|
||||
proxy = m_parent.fresh_proxy ();
|
||||
def = m.mk_or (m.mk_not (proxy), v);
|
||||
m_defs.push_back (def);
|
||||
m_expr2proxy.insert (v, proxy);
|
||||
m_proxy2def.insert (proxy, def);
|
||||
|
||||
m_parent.assert_expr (def.get ());
|
||||
return proxy;
|
||||
}
|
||||
|
||||
bool itp_solver::def_manager::is_proxy (app *k, app_ref &def)
|
||||
{
|
||||
app *r = NULL;
|
||||
bool found = m_proxy2def.find (k, r);
|
||||
def = r;
|
||||
return found;
|
||||
}
|
||||
|
||||
void itp_solver::def_manager::reset ()
|
||||
{
|
||||
m_expr2proxy.reset ();
|
||||
m_proxy2def.reset ();
|
||||
m_defs.reset ();
|
||||
}
|
||||
|
||||
bool itp_solver::def_manager::is_proxy_def (expr *v)
|
||||
{
|
||||
// XXX This might not be the most robust way to check
|
||||
return m_defs.contains (v);
|
||||
}
|
||||
|
||||
bool itp_solver::is_proxy(expr *e, app_ref &def)
|
||||
{
|
||||
if (!is_uninterp_const(e)) { return false; }
|
||||
|
||||
app *a = to_app (e);
|
||||
|
||||
for (int i = m_defs.size (); i > 0; --i)
|
||||
if (m_defs[i-1].is_proxy (a, def))
|
||||
{ return true; }
|
||||
|
||||
if (m_base_defs.is_proxy (a, def))
|
||||
{ return true; }
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void itp_solver::collect_statistics (statistics &st) const
|
||||
{
|
||||
m_solver.collect_statistics (st);
|
||||
st.update ("time.itp_solver.itp_core", m_itp_watch.get_seconds ());
|
||||
}
|
||||
|
||||
void itp_solver::reset_statistics ()
|
||||
{
|
||||
m_itp_watch.reset ();
|
||||
}
|
||||
|
||||
void itp_solver::get_unsat_core (ptr_vector<expr> &core)
|
||||
{
|
||||
m_solver.get_unsat_core (core);
|
||||
undo_proxies_in_core (core);
|
||||
}
|
||||
void itp_solver::undo_proxies_in_core (ptr_vector<expr> &r)
|
||||
{
|
||||
app_ref e(m);
|
||||
expr_fast_mark1 bg;
|
||||
for (unsigned i = 0; i < m_first_assumption; ++i)
|
||||
{ bg.mark(m_assumptions.get(i)); }
|
||||
|
||||
// expand proxies
|
||||
unsigned j = 0;
|
||||
for (unsigned i = 0, sz = r.size(); i < sz; ++i) {
|
||||
// skip background assumptions
|
||||
if (bg.is_marked(r[i])) { continue; }
|
||||
|
||||
// -- undo proxies, but only if they were introduced in check_sat
|
||||
if (m_is_proxied && is_proxy(r[i], e)) {
|
||||
SASSERT (m.is_or (e));
|
||||
r[j] = e->get_arg (1);
|
||||
} else if (i != j) { r[j] = r[i]; }
|
||||
j++;
|
||||
}
|
||||
r.shrink (j);
|
||||
}
|
||||
|
||||
void itp_solver::undo_proxies (expr_ref_vector &r)
|
||||
{
|
||||
app_ref e(m);
|
||||
// expand proxies
|
||||
for (unsigned i = 0, sz = r.size (); i < sz; ++i)
|
||||
if (is_proxy(r.get(i), e)) {
|
||||
SASSERT (m.is_or (e));
|
||||
r[i] = e->get_arg (1);
|
||||
}
|
||||
}
|
||||
|
||||
void itp_solver::get_unsat_core (expr_ref_vector &_core)
|
||||
{
|
||||
ptr_vector<expr> core;
|
||||
get_unsat_core (core);
|
||||
_core.append (core.size (), core.c_ptr ());
|
||||
}
|
||||
|
||||
void itp_solver::elim_proxies (expr_ref_vector &v)
|
||||
{
|
||||
expr_ref f = mk_and (v);
|
||||
scoped_ptr<expr_replacer> rep = mk_expr_simp_replacer (m);
|
||||
rep->set_substitution (&m_elim_proxies_sub);
|
||||
(*rep) (f);
|
||||
v.reset ();
|
||||
flatten_and (f, v);
|
||||
}
|
||||
|
||||
void itp_solver::get_itp_core (expr_ref_vector &core)
|
||||
{
|
||||
scoped_watch _t_ (m_itp_watch);
|
||||
|
||||
typedef obj_hashtable<expr> expr_set;
|
||||
expr_set B;
|
||||
for (unsigned i = m_first_assumption, sz = m_assumptions.size(); i < sz; ++i) {
|
||||
expr *a = m_assumptions.get (i);
|
||||
app_ref def(m);
|
||||
if (is_proxy(a, def)) { B.insert(def.get()); }
|
||||
B.insert (a);
|
||||
}
|
||||
|
||||
proof_ref pr(m);
|
||||
pr = get_proof ();
|
||||
|
||||
if (!m_new_unsat_core) {
|
||||
// old code
|
||||
farkas_learner learner_old;
|
||||
learner_old.set_split_literals(m_split_literals);
|
||||
|
||||
learner_old.get_lemmas (pr, B, core);
|
||||
elim_proxies (core);
|
||||
simplify_bounds (core); // XXX potentially redundant
|
||||
} else {
|
||||
// new code
|
||||
unsat_core_learner learner(m);
|
||||
|
||||
if (m_farkas_optimized) {
|
||||
if (true) // TODO: proper options
|
||||
{
|
||||
unsat_core_plugin_farkas_lemma_optimized* plugin_farkas_lemma_optimized = alloc(unsat_core_plugin_farkas_lemma_optimized, learner,m);
|
||||
learner.register_plugin(plugin_farkas_lemma_optimized);
|
||||
}
|
||||
else
|
||||
{
|
||||
unsat_core_plugin_farkas_lemma_bounded* plugin_farkas_lemma_bounded = alloc(unsat_core_plugin_farkas_lemma_bounded, learner,m);
|
||||
learner.register_plugin(plugin_farkas_lemma_bounded);
|
||||
}
|
||||
|
||||
} else {
|
||||
unsat_core_plugin_farkas_lemma* plugin_farkas_lemma = alloc(unsat_core_plugin_farkas_lemma, learner, m_split_literals, m_farkas_a_const);
|
||||
learner.register_plugin(plugin_farkas_lemma);
|
||||
}
|
||||
|
||||
if (m_minimize_unsat_core) {
|
||||
unsat_core_plugin_min_cut* plugin_min_cut = alloc(unsat_core_plugin_min_cut, learner, m);
|
||||
learner.register_plugin(plugin_min_cut);
|
||||
} else {
|
||||
unsat_core_plugin_lemma* plugin_lemma = alloc(unsat_core_plugin_lemma, learner);
|
||||
learner.register_plugin(plugin_lemma);
|
||||
}
|
||||
|
||||
learner.compute_unsat_core(pr, B, core);
|
||||
|
||||
elim_proxies (core);
|
||||
simplify_bounds (core); // XXX potentially redundant
|
||||
|
||||
// // debug
|
||||
// expr_ref_vector core2(m);
|
||||
// unsat_core_learner learner2(m);
|
||||
//
|
||||
// unsat_core_plugin_farkas_lemma* plugin_farkas_lemma2 = alloc(unsat_core_plugin_farkas_lemma, learner2, m_split_literals);
|
||||
// learner2.register_plugin(plugin_farkas_lemma2);
|
||||
// unsat_core_plugin_lemma* plugin_lemma2 = alloc(unsat_core_plugin_lemma, learner2);
|
||||
// learner2.register_plugin(plugin_lemma2);
|
||||
// learner2.compute_unsat_core(pr, B, core2);
|
||||
//
|
||||
// elim_proxies (core2);
|
||||
// simplify_bounds (core2);
|
||||
//
|
||||
// IF_VERBOSE(2,
|
||||
// verbose_stream () << "Itp Core:\n"
|
||||
// << mk_pp (mk_and (core), m) << "\n";);
|
||||
// IF_VERBOSE(2,
|
||||
// verbose_stream () << "Itp Core2:\n"
|
||||
// << mk_pp (mk_and (core2), m) << "\n";);
|
||||
//SASSERT(mk_and (core) == mk_and (core2));
|
||||
}
|
||||
|
||||
IF_VERBOSE(2,
|
||||
verbose_stream () << "Itp Core:\n"
|
||||
<< mk_pp (mk_and (core), m) << "\n";);
|
||||
|
||||
}
|
||||
|
||||
void itp_solver::refresh ()
|
||||
{
|
||||
// only refresh in non-pushed state
|
||||
SASSERT (m_defs.size () == 0);
|
||||
expr_ref_vector assertions (m);
|
||||
for (unsigned i = 0, e = m_solver.get_num_assertions(); i < e; ++i) {
|
||||
expr* a = m_solver.get_assertion (i);
|
||||
if (!m_base_defs.is_proxy_def(a)) { assertions.push_back(a); }
|
||||
|
||||
}
|
||||
m_base_defs.reset ();
|
||||
NOT_IMPLEMENTED_YET ();
|
||||
// solver interface does not have a reset method. need to introduce it somewhere.
|
||||
// m_solver.reset ();
|
||||
for (unsigned i = 0, e = assertions.size (); i < e; ++i)
|
||||
{ m_solver.assert_expr(assertions.get(i)); }
|
||||
}
|
||||
|
||||
}
|
177
src/muz/spacer/spacer_itp_solver.h
Normal file
177
src/muz/spacer/spacer_itp_solver.h
Normal file
|
@ -0,0 +1,177 @@
|
|||
/**
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_itp_solver.h
|
||||
|
||||
Abstract:
|
||||
|
||||
A solver that produces interpolated unsat cores
|
||||
|
||||
Author:
|
||||
|
||||
Arie Gurfinkel
|
||||
|
||||
Notes:
|
||||
|
||||
--*/
|
||||
#ifndef SPACER_ITP_SOLVER_H_
|
||||
#define SPACER_ITP_SOLVER_H_
|
||||
|
||||
#include "solver.h"
|
||||
#include "expr_substitution.h"
|
||||
#include"stopwatch.h"
|
||||
namespace spacer {
|
||||
class itp_solver : public solver {
|
||||
private:
|
||||
struct def_manager {
|
||||
itp_solver &m_parent;
|
||||
obj_map<expr, app*> m_expr2proxy;
|
||||
obj_map<app, app*> m_proxy2def;
|
||||
|
||||
expr_ref_vector m_defs;
|
||||
|
||||
def_manager(itp_solver &parent) :
|
||||
m_parent(parent), m_defs(m_parent.m)
|
||||
{}
|
||||
|
||||
bool is_proxy(app *k, app_ref &v);
|
||||
app* mk_proxy(expr *v);
|
||||
void reset();
|
||||
bool is_proxy_def(expr *v);
|
||||
|
||||
};
|
||||
|
||||
friend struct def_manager;
|
||||
ast_manager &m;
|
||||
solver &m_solver;
|
||||
app_ref_vector m_proxies;
|
||||
unsigned m_num_proxies;
|
||||
vector<def_manager> m_defs;
|
||||
def_manager m_base_defs;
|
||||
expr_ref_vector m_assumptions;
|
||||
unsigned m_first_assumption;
|
||||
bool m_is_proxied;
|
||||
|
||||
stopwatch m_itp_watch;
|
||||
|
||||
expr_substitution m_elim_proxies_sub;
|
||||
bool m_split_literals;
|
||||
bool m_new_unsat_core;
|
||||
bool m_minimize_unsat_core;
|
||||
bool m_farkas_optimized;
|
||||
bool m_farkas_a_const;
|
||||
|
||||
bool is_proxy(expr *e, app_ref &def);
|
||||
void undo_proxies_in_core(ptr_vector<expr> &v);
|
||||
app* mk_proxy(expr *v);
|
||||
app* fresh_proxy();
|
||||
void elim_proxies(expr_ref_vector &v);
|
||||
public:
|
||||
itp_solver(solver &solver, bool new_unsat_core, bool minimize_unsat_core, bool farkas_optimized, bool farkas_a_const, bool split_literals = false) :
|
||||
m(solver.get_manager()),
|
||||
m_solver(solver),
|
||||
m_proxies(m),
|
||||
m_num_proxies(0),
|
||||
m_base_defs(*this),
|
||||
m_assumptions(m),
|
||||
m_first_assumption(0),
|
||||
m_is_proxied(false),
|
||||
m_elim_proxies_sub(m, false, true),
|
||||
m_split_literals(split_literals),
|
||||
m_new_unsat_core(new_unsat_core),
|
||||
m_minimize_unsat_core(minimize_unsat_core),
|
||||
m_farkas_optimized(farkas_optimized),
|
||||
m_farkas_a_const(farkas_a_const)
|
||||
{}
|
||||
|
||||
virtual ~itp_solver() {}
|
||||
|
||||
/* itp solver specific */
|
||||
virtual void get_unsat_core(expr_ref_vector &core);
|
||||
virtual void get_itp_core(expr_ref_vector &core);
|
||||
void set_split_literals(bool v) {m_split_literals = v;}
|
||||
bool mk_proxies(expr_ref_vector &v, unsigned from = 0);
|
||||
void undo_proxies(expr_ref_vector &v);
|
||||
|
||||
void push_bg(expr *e);
|
||||
void pop_bg(unsigned n);
|
||||
unsigned get_num_bg();
|
||||
|
||||
void get_full_unsat_core(ptr_vector<expr> &core)
|
||||
{m_solver.get_unsat_core(core);}
|
||||
|
||||
/* solver interface */
|
||||
|
||||
virtual solver* translate(ast_manager &m, params_ref const &p)
|
||||
{return m_solver.translate(m, p);}
|
||||
virtual void updt_params(params_ref const &p)
|
||||
{m_solver.updt_params(p);}
|
||||
virtual void collect_param_descrs(param_descrs &r)
|
||||
{m_solver.collect_param_descrs(r);}
|
||||
virtual void set_produce_models(bool f)
|
||||
{m_solver.set_produce_models(f);}
|
||||
virtual void assert_expr(expr *t)
|
||||
{m_solver.assert_expr(t);}
|
||||
|
||||
virtual void assert_expr(expr *t, expr *a)
|
||||
{NOT_IMPLEMENTED_YET();}
|
||||
|
||||
virtual void push();
|
||||
virtual void pop(unsigned n);
|
||||
virtual unsigned get_scope_level() const
|
||||
{return m_solver.get_scope_level();}
|
||||
|
||||
virtual lbool check_sat(unsigned num_assumptions, expr * const *assumptions);
|
||||
virtual void set_progress_callback(progress_callback *callback)
|
||||
{m_solver.set_progress_callback(callback);}
|
||||
virtual unsigned get_num_assertions() const
|
||||
{return m_solver.get_num_assertions();}
|
||||
virtual expr * get_assertion(unsigned idx) const
|
||||
{return m_solver.get_assertion(idx);}
|
||||
virtual unsigned get_num_assumptions() const
|
||||
{return m_solver.get_num_assumptions();}
|
||||
virtual expr * get_assumption(unsigned idx) const
|
||||
{return m_solver.get_assumption(idx);}
|
||||
virtual std::ostream &display(std::ostream &out) const
|
||||
{m_solver.display(out); return out;}
|
||||
|
||||
/* check_sat_result interface */
|
||||
|
||||
virtual void collect_statistics(statistics &st) const ;
|
||||
virtual void reset_statistics();
|
||||
virtual void get_unsat_core(ptr_vector<expr> &r);
|
||||
virtual void get_model(model_ref &m) {m_solver.get_model(m);}
|
||||
virtual proof *get_proof() {return m_solver.get_proof();}
|
||||
virtual std::string reason_unknown() const
|
||||
{return m_solver.reason_unknown();}
|
||||
virtual void set_reason_unknown(char const* msg)
|
||||
{m_solver.set_reason_unknown(msg);}
|
||||
virtual void get_labels(svector<symbol> &r)
|
||||
{m_solver.get_labels(r);}
|
||||
virtual ast_manager &get_manager() const {return m;}
|
||||
|
||||
virtual void refresh();
|
||||
|
||||
class scoped_mk_proxy {
|
||||
itp_solver &m_s;
|
||||
expr_ref_vector &m_v;
|
||||
public:
|
||||
scoped_mk_proxy(itp_solver &s, expr_ref_vector &v) : m_s(s), m_v(v)
|
||||
{m_s.mk_proxies(m_v);}
|
||||
~scoped_mk_proxy()
|
||||
{m_s.undo_proxies(m_v);}
|
||||
};
|
||||
|
||||
class scoped_bg {
|
||||
itp_solver &m_s;
|
||||
unsigned m_bg_sz;
|
||||
public:
|
||||
scoped_bg(itp_solver &s) : m_s(s), m_bg_sz(m_s.get_num_bg()) {}
|
||||
~scoped_bg()
|
||||
{if(m_s.get_num_bg() > m_bg_sz) { m_s.pop_bg(m_s.get_num_bg() - m_bg_sz); }}
|
||||
};
|
||||
};
|
||||
}
|
||||
#endif
|
170
src/muz/spacer/spacer_legacy_frames.cpp
Normal file
170
src/muz/spacer/spacer_legacy_frames.cpp
Normal file
|
@ -0,0 +1,170 @@
|
|||
/*
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Legacy implementations of frames. To be removed.
|
||||
*/
|
||||
#include "spacer_context.h"
|
||||
#include <sstream>
|
||||
#include <iomanip>
|
||||
|
||||
#include "dl_util.h"
|
||||
#include "rewriter.h"
|
||||
#include "rewriter_def.h"
|
||||
#include "var_subst.h"
|
||||
#include "util.h"
|
||||
#include "spacer_prop_solver.h"
|
||||
#include "spacer_context.h"
|
||||
#include "spacer_generalizers.h"
|
||||
#include "for_each_expr.h"
|
||||
#include "dl_rule_set.h"
|
||||
#include "unit_subsumption_tactic.h"
|
||||
#include "model_smt2_pp.h"
|
||||
#include "dl_mk_rule_inliner.h"
|
||||
#include "ast_smt2_pp.h"
|
||||
#include "ast_ll_pp.h"
|
||||
#include "ast_util.h"
|
||||
#include "proof_checker.h"
|
||||
#include "smt_value_sort.h"
|
||||
#include "proof_utils.h"
|
||||
#include "scoped_proof.h"
|
||||
#include "spacer_qe_project.h"
|
||||
#include "blast_term_ite_tactic.h"
|
||||
|
||||
#include "timeit.h"
|
||||
#include "luby.h"
|
||||
#include "expr_safe_replace.h"
|
||||
#include "expr_abstract.h"
|
||||
#include "obj_equiv_class.h"
|
||||
|
||||
|
||||
namespace spacer {
|
||||
// ------------------
|
||||
// legacy_frames
|
||||
void pred_transformer::legacy_frames::simplify_formulas(tactic& tac,
|
||||
expr_ref_vector& v)
|
||||
{
|
||||
ast_manager &m = m_pt.get_ast_manager();
|
||||
goal_ref g(alloc(goal, m, false, false, false));
|
||||
for (unsigned j = 0; j < v.size(); ++j) { g->assert_expr(v[j].get()); }
|
||||
model_converter_ref mc;
|
||||
proof_converter_ref pc;
|
||||
expr_dependency_ref core(m);
|
||||
goal_ref_buffer result;
|
||||
tac(g, result, mc, pc, core);
|
||||
SASSERT(result.size() == 1);
|
||||
goal* r = result[0];
|
||||
v.reset();
|
||||
for (unsigned j = 0; j < r->size(); ++j) { v.push_back(r->form(j)); }
|
||||
}
|
||||
|
||||
void pred_transformer::legacy_frames::simplify_formulas()
|
||||
{
|
||||
ast_manager &m = m_pt.get_ast_manager();
|
||||
tactic_ref us = mk_unit_subsumption_tactic(m);
|
||||
simplify_formulas(*us, m_invariants);
|
||||
for (unsigned i = 0; i < m_levels.size(); ++i) {
|
||||
simplify_formulas(*us, m_levels[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void pred_transformer::legacy_frames::get_frame_geq_lemmas(unsigned lvl,
|
||||
expr_ref_vector &out)
|
||||
{
|
||||
get_frame_lemmas(infty_level(), out);
|
||||
for (unsigned i = lvl, sz = m_levels.size(); i < sz; ++i)
|
||||
{ get_frame_lemmas(i, out); }
|
||||
}
|
||||
|
||||
bool pred_transformer::legacy_frames::propagate_to_next_level(unsigned src_level)
|
||||
{
|
||||
|
||||
ast_manager &m = m_pt.get_ast_manager();
|
||||
if (m_levels.size() <= src_level) { return true; }
|
||||
if (m_levels [src_level].empty()) { return true; }
|
||||
|
||||
unsigned tgt_level = next_level(src_level);
|
||||
m_pt.ensure_level(next_level(tgt_level));
|
||||
|
||||
TRACE("spacer",
|
||||
tout << "propagating " << src_level << " to " << tgt_level;
|
||||
tout << " for relation " << m_pt.head()->get_name() << "\n";);
|
||||
|
||||
for (unsigned i = 0; i < m_levels[src_level].size();) {
|
||||
expr_ref_vector &src = m_levels[src_level];
|
||||
expr * curr = src[i].get();
|
||||
unsigned stored_lvl;
|
||||
VERIFY(m_prop2level.find(curr, stored_lvl));
|
||||
SASSERT(stored_lvl >= src_level);
|
||||
unsigned solver_level;
|
||||
if (stored_lvl > src_level) {
|
||||
TRACE("spacer", tout << "at level: " << stored_lvl << " " << mk_pp(curr, m) << "\n";);
|
||||
src[i] = src.back();
|
||||
src.pop_back();
|
||||
} else if (m_pt.is_invariant(tgt_level, curr, solver_level)) {
|
||||
// -- might invalidate src reference
|
||||
add_lemma(curr, solver_level);
|
||||
TRACE("spacer", tout << "is invariant: " << pp_level(solver_level) << " " << mk_pp(curr, m) << "\n";);
|
||||
// shadow higher-level src
|
||||
expr_ref_vector &src = m_levels[src_level];
|
||||
src[i] = src.back();
|
||||
src.pop_back();
|
||||
++m_pt.m_stats.m_num_propagations;
|
||||
} else {
|
||||
TRACE("spacer", tout << "not propagated: " << mk_pp(curr, m) << "\n";);
|
||||
++i;
|
||||
}
|
||||
}
|
||||
|
||||
CTRACE("spacer", m_levels[src_level].empty(),
|
||||
tout << "Fully propagated level "
|
||||
<< src_level << " of " << m_pt.head()->get_name() << "\n";);
|
||||
|
||||
return m_levels[src_level].empty();
|
||||
}
|
||||
|
||||
bool pred_transformer::legacy_frames::add_lemma(expr * lemma, unsigned lvl)
|
||||
{
|
||||
if (is_infty_level(lvl)) {
|
||||
if (!m_invariants.contains(lemma)) {
|
||||
m_invariants.push_back(lemma);
|
||||
m_prop2level.insert(lemma, lvl);
|
||||
//m_pt.add_lemma_core (lemma, lvl);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
unsigned old_level;
|
||||
if (!m_prop2level.find(lemma, old_level) || old_level < lvl) {
|
||||
m_levels[lvl].push_back(lemma);
|
||||
m_prop2level.insert(lemma, lvl);
|
||||
//m_pt.add_lemma_core (lemma, lvl);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void pred_transformer::legacy_frames::propagate_to_infinity(unsigned level)
|
||||
{
|
||||
TRACE("spacer", tout << "propagating to oo from lvl " << level
|
||||
<< " of " << m_pt.m_head->get_name() << "\n";);
|
||||
|
||||
if (m_levels.empty()) { return; }
|
||||
|
||||
for (unsigned i = m_levels.size(); i > level; --i) {
|
||||
expr_ref_vector &lemmas = m_levels [i - 1];
|
||||
for (unsigned j = 0; j < lemmas.size(); ++j)
|
||||
{ add_lemma(lemmas.get(j), infty_level()); }
|
||||
lemmas.reset();
|
||||
}
|
||||
}
|
||||
|
||||
void pred_transformer::legacy_frames::inherit_frames(legacy_frames& other)
|
||||
{
|
||||
|
||||
SASSERT(m_pt.m_head == other.m_pt.m_head);
|
||||
obj_map<expr, unsigned>::iterator it = other.m_prop2level.begin();
|
||||
obj_map<expr, unsigned>::iterator end = other.m_prop2level.end();
|
||||
for (; it != end; ++it) { add_lemma(it->m_key, it->m_value); }
|
||||
}
|
||||
}
|
47
src/muz/spacer/spacer_legacy_frames.h
Normal file
47
src/muz/spacer/spacer_legacy_frames.h
Normal file
|
@ -0,0 +1,47 @@
|
|||
/**++
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Legacy implementations of frames. To be removed.
|
||||
|
||||
Notes: this file is included from the middle of spacer_context.h
|
||||
*/
|
||||
|
||||
class legacy_frames
|
||||
{
|
||||
pred_transformer &m_pt;
|
||||
|
||||
/// level formulas
|
||||
vector<expr_ref_vector> m_levels;
|
||||
/// map property to level where it occurs.
|
||||
obj_map<expr, unsigned> m_prop2level;
|
||||
/// properties that are invariant.
|
||||
expr_ref_vector m_invariants;
|
||||
|
||||
void simplify_formulas (tactic& tac, expr_ref_vector& v);
|
||||
|
||||
public:
|
||||
legacy_frames (pred_transformer &pt) :
|
||||
m_pt(pt), m_invariants (m_pt.get_ast_manager ()) {}
|
||||
pred_transformer& pt () const {return m_pt;}
|
||||
bool add_lemma (expr * lemma, unsigned level);
|
||||
void get_frame_lemmas (unsigned level, expr_ref_vector &out)
|
||||
{
|
||||
if(is_infty_level(level)) { out.append(m_invariants); }
|
||||
else if(level < m_levels.size()) { out.append(m_levels [level]); }
|
||||
}
|
||||
|
||||
void get_frame_geq_lemmas (unsigned level, expr_ref_vector &out);
|
||||
void add_frame () {m_levels.push_back (expr_ref_vector (m_pt.get_ast_manager ()));}
|
||||
|
||||
unsigned size () const {return m_levels.size ();}
|
||||
unsigned lemma_size () const {return m_prop2level.size ();}
|
||||
|
||||
|
||||
void propagate_to_infinity (unsigned level);
|
||||
bool propagate_to_next_level (unsigned level);
|
||||
|
||||
void simplify_formulas ();
|
||||
|
||||
void inherit_frames (legacy_frames& other);
|
||||
|
||||
};
|
116
src/muz/spacer/spacer_legacy_mbp.cpp
Normal file
116
src/muz/spacer/spacer_legacy_mbp.cpp
Normal file
|
@ -0,0 +1,116 @@
|
|||
/**
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_legacy_mbp.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
Legacy Model Based Projection. Used by Grigory Fedyukovich
|
||||
|
||||
Author:
|
||||
|
||||
Arie Gurfinkel
|
||||
Anvesh Komuravelli
|
||||
Notes:
|
||||
|
||||
--*/
|
||||
#include <sstream>
|
||||
#include "arith_simplifier_plugin.h"
|
||||
#include "array_decl_plugin.h"
|
||||
#include "ast_pp.h"
|
||||
#include "basic_simplifier_plugin.h"
|
||||
#include "bv_simplifier_plugin.h"
|
||||
#include "bool_rewriter.h"
|
||||
#include "dl_util.h"
|
||||
#include "for_each_expr.h"
|
||||
#include "smt_params.h"
|
||||
#include "model.h"
|
||||
#include "ref_vector.h"
|
||||
#include "rewriter.h"
|
||||
#include "rewriter_def.h"
|
||||
#include "util.h"
|
||||
#include "spacer_manager.h"
|
||||
#include "spacer_util.h"
|
||||
#include "arith_decl_plugin.h"
|
||||
#include "expr_replacer.h"
|
||||
#include "model_smt2_pp.h"
|
||||
#include "scoped_proof.h"
|
||||
#include "qe_lite.h"
|
||||
#include "spacer_qe_project.h"
|
||||
#include "model_pp.h"
|
||||
#include "expr_safe_replace.h"
|
||||
|
||||
#include "datatype_decl_plugin.h"
|
||||
#include "bv_decl_plugin.h"
|
||||
|
||||
#include "spacer_legacy_mev.h"
|
||||
|
||||
namespace spacer {
|
||||
void qe_project(ast_manager& m, app_ref_vector& vars, expr_ref& fml, model_ref& M, expr_map& map)
|
||||
{
|
||||
th_rewriter rw(m);
|
||||
// qe-lite; TODO: use qe_lite aggressively
|
||||
params_ref p;
|
||||
qe_lite qe(m, p, true);
|
||||
qe(vars, fml);
|
||||
rw(fml);
|
||||
|
||||
TRACE("spacer",
|
||||
tout << "After qe_lite:\n";
|
||||
tout << mk_pp(fml, m) << "\n";
|
||||
tout << "Vars:\n";
|
||||
for (unsigned i = 0; i < vars.size(); ++i) {
|
||||
tout << mk_pp(vars.get(i), m) << "\n";
|
||||
}
|
||||
);
|
||||
|
||||
// substitute model values for booleans and
|
||||
// use LW projection for arithmetic variables
|
||||
if (!vars.empty()) {
|
||||
app_ref_vector arith_vars(m);
|
||||
expr_substitution sub(m);
|
||||
proof_ref pr(m.mk_asserted(m.mk_true()), m);
|
||||
expr_ref bval(m);
|
||||
for (unsigned i = 0; i < vars.size(); i++) {
|
||||
if (m.is_bool(vars.get(i))) {
|
||||
// obtain the interpretation of the ith var using model completion
|
||||
VERIFY(M->eval(vars.get(i), bval, true));
|
||||
sub.insert(vars.get(i), bval, pr);
|
||||
} else {
|
||||
arith_vars.push_back(vars.get(i));
|
||||
}
|
||||
}
|
||||
if (!sub.empty()) {
|
||||
scoped_ptr<expr_replacer> rep = mk_expr_simp_replacer(m);
|
||||
rep->set_substitution(&sub);
|
||||
(*rep)(fml);
|
||||
rw(fml);
|
||||
TRACE("spacer",
|
||||
tout << "Projected Boolean vars:\n" << mk_pp(fml, m) << "\n";
|
||||
);
|
||||
}
|
||||
// model based projection
|
||||
if (!arith_vars.empty()) {
|
||||
TRACE("spacer",
|
||||
tout << "Arith vars:\n";
|
||||
for (unsigned i = 0; i < arith_vars.size(); ++i) {
|
||||
tout << mk_pp(arith_vars.get(i), m) << "\n";
|
||||
}
|
||||
);
|
||||
{
|
||||
scoped_no_proof _sp(m);
|
||||
qe::arith_project(*M, arith_vars, fml, map);
|
||||
}
|
||||
SASSERT(arith_vars.empty());
|
||||
TRACE("spacer",
|
||||
tout << "Projected arith vars:\n" << mk_pp(fml, m) << "\n";
|
||||
);
|
||||
}
|
||||
SASSERT(M->eval(fml, bval, true) && m.is_true(bval)); // M |= fml
|
||||
vars.reset();
|
||||
vars.append(arith_vars);
|
||||
}
|
||||
}
|
||||
}
|
837
src/muz/spacer/spacer_legacy_mev.cpp
Normal file
837
src/muz/spacer/spacer_legacy_mev.cpp
Normal file
|
@ -0,0 +1,837 @@
|
|||
/**
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Deprecated implementation of model evaluator. To be removed.
|
||||
*/
|
||||
|
||||
#include <sstream>
|
||||
#include "arith_simplifier_plugin.h"
|
||||
#include "array_decl_plugin.h"
|
||||
#include "ast_pp.h"
|
||||
#include "basic_simplifier_plugin.h"
|
||||
#include "bv_simplifier_plugin.h"
|
||||
#include "bool_rewriter.h"
|
||||
#include "dl_util.h"
|
||||
#include "for_each_expr.h"
|
||||
#include "smt_params.h"
|
||||
#include "model.h"
|
||||
#include "ref_vector.h"
|
||||
#include "rewriter.h"
|
||||
#include "rewriter_def.h"
|
||||
#include "util.h"
|
||||
#include "spacer_manager.h"
|
||||
#include "spacer_legacy_mev.h"
|
||||
#include "spacer_util.h"
|
||||
#include "arith_decl_plugin.h"
|
||||
#include "expr_replacer.h"
|
||||
#include "model_smt2_pp.h"
|
||||
#include "scoped_proof.h"
|
||||
#include "qe_lite.h"
|
||||
#include "spacer_qe_project.h"
|
||||
#include "model_pp.h"
|
||||
#include "expr_safe_replace.h"
|
||||
|
||||
#include "datatype_decl_plugin.h"
|
||||
#include "bv_decl_plugin.h"
|
||||
|
||||
namespace old {
|
||||
|
||||
/////////////////////////
|
||||
// model_evaluator
|
||||
//
|
||||
|
||||
|
||||
void model_evaluator::assign_value(expr* e, expr* val)
|
||||
{
|
||||
rational r;
|
||||
if (m.is_true(val)) {
|
||||
set_true(e);
|
||||
} else if (m.is_false(val)) {
|
||||
set_false(e);
|
||||
} else if (m_arith.is_numeral(val, r)) {
|
||||
set_number(e, r);
|
||||
} else if (m.is_value(val)) {
|
||||
set_value(e, val);
|
||||
} else {
|
||||
IF_VERBOSE(3, verbose_stream() << "Not evaluated " << mk_pp(e, m) << "\n";);
|
||||
TRACE("old_spacer", tout << "Variable is not tracked: " << mk_pp(e, m) << "\n";);
|
||||
set_x(e);
|
||||
}
|
||||
}
|
||||
|
||||
void model_evaluator::setup_model(const model_ref& model)
|
||||
{
|
||||
m_numbers.reset();
|
||||
m_values.reset();
|
||||
m_model = model.get();
|
||||
rational r;
|
||||
unsigned sz = model->get_num_constants();
|
||||
for (unsigned i = 0; i < sz; i++) {
|
||||
func_decl * d = model->get_constant(i);
|
||||
expr* val = model->get_const_interp(d);
|
||||
expr* e = m.mk_const(d);
|
||||
m_refs.push_back(e);
|
||||
assign_value(e, val);
|
||||
}
|
||||
}
|
||||
|
||||
void model_evaluator::reset()
|
||||
{
|
||||
m1.reset();
|
||||
m2.reset();
|
||||
m_values.reset();
|
||||
m_visited.reset();
|
||||
m_numbers.reset();
|
||||
m_refs.reset();
|
||||
m_model = 0;
|
||||
}
|
||||
|
||||
|
||||
void model_evaluator::minimize_literals(ptr_vector<expr> const& formulas,
|
||||
const model_ref& mdl, expr_ref_vector& result)
|
||||
{
|
||||
|
||||
TRACE("old_spacer",
|
||||
tout << "formulas:\n";
|
||||
for (unsigned i = 0; i < formulas.size(); ++i) tout << mk_pp(formulas[i], m) << "\n";
|
||||
);
|
||||
|
||||
expr_ref tmp(m);
|
||||
ptr_vector<expr> tocollect;
|
||||
|
||||
setup_model(mdl);
|
||||
collect(formulas, tocollect);
|
||||
for (unsigned i = 0; i < tocollect.size(); ++i) {
|
||||
expr* e = tocollect[i];
|
||||
expr* e1, *e2;
|
||||
SASSERT(m.is_bool(e));
|
||||
SASSERT(is_true(e) || is_false(e));
|
||||
if (is_true(e)) {
|
||||
result.push_back(e);
|
||||
}
|
||||
// hack to break disequalities for arithmetic variables.
|
||||
else if (m.is_eq(e, e1, e2) && m_arith.is_int_real(e1)) {
|
||||
if (get_number(e1) < get_number(e2)) {
|
||||
result.push_back(m_arith.mk_lt(e1, e2));
|
||||
} else {
|
||||
result.push_back(m_arith.mk_lt(e2, e1));
|
||||
}
|
||||
} else {
|
||||
result.push_back(m.mk_not(e));
|
||||
}
|
||||
}
|
||||
reset();
|
||||
TRACE("old_spacer",
|
||||
tout << "minimized model:\n";
|
||||
for (unsigned i = 0; i < result.size(); ++i) tout << mk_pp(result[i].get(), m) << "\n";
|
||||
);
|
||||
}
|
||||
|
||||
void model_evaluator::process_formula(app* e, ptr_vector<expr>& todo, ptr_vector<expr>& tocollect)
|
||||
{
|
||||
SASSERT(m.is_bool(e));
|
||||
SASSERT(is_true(e) || is_false(e));
|
||||
unsigned v = is_true(e);
|
||||
unsigned sz = e->get_num_args();
|
||||
expr* const* args = e->get_args();
|
||||
if (e->get_family_id() == m.get_basic_family_id()) {
|
||||
switch (e->get_decl_kind()) {
|
||||
case OP_TRUE:
|
||||
break;
|
||||
case OP_FALSE:
|
||||
break;
|
||||
case OP_EQ:
|
||||
case OP_IFF:
|
||||
if (args[0] == args[1]) {
|
||||
SASSERT(v);
|
||||
// no-op
|
||||
} else if (m.is_bool(args[0])) {
|
||||
todo.append(sz, args);
|
||||
} else {
|
||||
tocollect.push_back(e);
|
||||
}
|
||||
break;
|
||||
case OP_DISTINCT:
|
||||
tocollect.push_back(e);
|
||||
break;
|
||||
case OP_ITE:
|
||||
if (args[1] == args[2]) {
|
||||
tocollect.push_back(args[1]);
|
||||
} else if (is_true(args[1]) && is_true(args[2])) {
|
||||
todo.append(2, args + 1);
|
||||
} else if (is_false(args[1]) && is_false(args[2])) {
|
||||
todo.append(2, args + 1);
|
||||
} else if (is_true(args[0])) {
|
||||
todo.append(2, args);
|
||||
} else {
|
||||
SASSERT(is_false(args[0]));
|
||||
todo.push_back(args[0]);
|
||||
todo.push_back(args[2]);
|
||||
}
|
||||
break;
|
||||
case OP_AND:
|
||||
if (v) {
|
||||
todo.append(sz, args);
|
||||
} else {
|
||||
unsigned i = 0;
|
||||
for (; !is_false(args[i]) && i < sz; ++i);
|
||||
if (i == sz) {
|
||||
fatal_error(1);
|
||||
}
|
||||
VERIFY(i < sz);
|
||||
todo.push_back(args[i]);
|
||||
}
|
||||
break;
|
||||
case OP_OR:
|
||||
if (v) {
|
||||
unsigned i = 0;
|
||||
for (; !is_true(args[i]) && i < sz; ++i);
|
||||
if (i == sz) {
|
||||
fatal_error(1);
|
||||
}
|
||||
VERIFY(i < sz);
|
||||
todo.push_back(args[i]);
|
||||
} else {
|
||||
todo.append(sz, args);
|
||||
}
|
||||
break;
|
||||
case OP_XOR:
|
||||
case OP_NOT:
|
||||
todo.append(sz, args);
|
||||
break;
|
||||
case OP_IMPLIES:
|
||||
if (v) {
|
||||
if (is_true(args[1])) {
|
||||
todo.push_back(args[1]);
|
||||
} else if (is_false(args[0])) {
|
||||
todo.push_back(args[0]);
|
||||
} else {
|
||||
IF_VERBOSE(0, verbose_stream() << "Term not handled " << mk_pp(e, m) << "\n";);
|
||||
UNREACHABLE();
|
||||
}
|
||||
} else {
|
||||
todo.append(sz, args);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
IF_VERBOSE(0, verbose_stream() << "Term not handled " << mk_pp(e, m) << "\n";);
|
||||
UNREACHABLE();
|
||||
}
|
||||
} else {
|
||||
tocollect.push_back(e);
|
||||
}
|
||||
}
|
||||
|
||||
void model_evaluator::collect(ptr_vector<expr> const& formulas, ptr_vector<expr>& tocollect)
|
||||
{
|
||||
ptr_vector<expr> todo;
|
||||
todo.append(formulas);
|
||||
m_visited.reset();
|
||||
|
||||
VERIFY(check_model(formulas));
|
||||
|
||||
while (!todo.empty()) {
|
||||
app* e = to_app(todo.back());
|
||||
todo.pop_back();
|
||||
if (!m_visited.is_marked(e)) {
|
||||
process_formula(e, todo, tocollect);
|
||||
m_visited.mark(e, true);
|
||||
}
|
||||
}
|
||||
m_visited.reset();
|
||||
}
|
||||
|
||||
void model_evaluator::eval_arith(app* e)
|
||||
{
|
||||
rational r, r2;
|
||||
|
||||
#define ARG1 e->get_arg(0)
|
||||
#define ARG2 e->get_arg(1)
|
||||
|
||||
unsigned arity = e->get_num_args();
|
||||
for (unsigned i = 0; i < arity; ++i) {
|
||||
expr* arg = e->get_arg(i);
|
||||
if (is_x(arg)) {
|
||||
set_x(e);
|
||||
return;
|
||||
}
|
||||
SASSERT(!is_unknown(arg));
|
||||
}
|
||||
switch (e->get_decl_kind()) {
|
||||
case OP_NUM:
|
||||
VERIFY(m_arith.is_numeral(e, r));
|
||||
set_number(e, r);
|
||||
break;
|
||||
case OP_IRRATIONAL_ALGEBRAIC_NUM:
|
||||
set_x(e);
|
||||
break;
|
||||
case OP_LE:
|
||||
set_bool(e, get_number(ARG1) <= get_number(ARG2));
|
||||
break;
|
||||
case OP_GE:
|
||||
set_bool(e, get_number(ARG1) >= get_number(ARG2));
|
||||
break;
|
||||
case OP_LT:
|
||||
set_bool(e, get_number(ARG1) < get_number(ARG2));
|
||||
break;
|
||||
case OP_GT:
|
||||
set_bool(e, get_number(ARG1) > get_number(ARG2));
|
||||
break;
|
||||
case OP_ADD:
|
||||
r = rational::zero();
|
||||
for (unsigned i = 0; i < arity; ++i) {
|
||||
r += get_number(e->get_arg(i));
|
||||
}
|
||||
set_number(e, r);
|
||||
break;
|
||||
case OP_SUB:
|
||||
r = get_number(e->get_arg(0));
|
||||
for (unsigned i = 1; i < arity; ++i) {
|
||||
r -= get_number(e->get_arg(i));
|
||||
}
|
||||
set_number(e, r);
|
||||
break;
|
||||
case OP_UMINUS:
|
||||
SASSERT(arity == 1);
|
||||
set_number(e, -get_number(e->get_arg(0)));
|
||||
break;
|
||||
case OP_MUL:
|
||||
r = rational::one();
|
||||
for (unsigned i = 0; i < arity; ++i) {
|
||||
r *= get_number(e->get_arg(i));
|
||||
}
|
||||
set_number(e, r);
|
||||
break;
|
||||
case OP_DIV:
|
||||
SASSERT(arity == 2);
|
||||
r = get_number(ARG2);
|
||||
if (r.is_zero()) {
|
||||
set_x(e);
|
||||
} else {
|
||||
set_number(e, get_number(ARG1) / r);
|
||||
}
|
||||
break;
|
||||
case OP_IDIV:
|
||||
SASSERT(arity == 2);
|
||||
r = get_number(ARG2);
|
||||
if (r.is_zero()) {
|
||||
set_x(e);
|
||||
} else {
|
||||
set_number(e, div(get_number(ARG1), r));
|
||||
}
|
||||
break;
|
||||
case OP_REM:
|
||||
// rem(v1,v2) = if v2 >= 0 then mod(v1,v2) else -mod(v1,v2)
|
||||
SASSERT(arity == 2);
|
||||
r = get_number(ARG2);
|
||||
if (r.is_zero()) {
|
||||
set_x(e);
|
||||
} else {
|
||||
r2 = mod(get_number(ARG1), r);
|
||||
if (r.is_neg()) { r2.neg(); }
|
||||
set_number(e, r2);
|
||||
}
|
||||
break;
|
||||
case OP_MOD:
|
||||
SASSERT(arity == 2);
|
||||
r = get_number(ARG2);
|
||||
if (r.is_zero()) {
|
||||
set_x(e);
|
||||
} else {
|
||||
set_number(e, mod(get_number(ARG1), r));
|
||||
}
|
||||
break;
|
||||
case OP_TO_REAL:
|
||||
SASSERT(arity == 1);
|
||||
set_number(e, get_number(ARG1));
|
||||
break;
|
||||
case OP_TO_INT:
|
||||
SASSERT(arity == 1);
|
||||
set_number(e, floor(get_number(ARG1)));
|
||||
break;
|
||||
case OP_IS_INT:
|
||||
SASSERT(arity == 1);
|
||||
set_bool(e, get_number(ARG1).is_int());
|
||||
break;
|
||||
case OP_POWER:
|
||||
set_x(e);
|
||||
break;
|
||||
default:
|
||||
IF_VERBOSE(0, verbose_stream() << "Term not handled " << mk_pp(e, m) << "\n";);
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void model_evaluator::inherit_value(expr* e, expr* v)
|
||||
{
|
||||
expr* w;
|
||||
SASSERT(!is_unknown(v));
|
||||
SASSERT(m.get_sort(e) == m.get_sort(v));
|
||||
if (is_x(v)) {
|
||||
set_x(e);
|
||||
} else if (m.is_bool(e)) {
|
||||
SASSERT(m.is_bool(v));
|
||||
if (is_true(v)) { set_true(e); }
|
||||
else if (is_false(v)) { set_false(e); }
|
||||
else {
|
||||
TRACE("old_spacer", tout << "not inherited:\n" << mk_pp(e, m) << "\n" << mk_pp(v, m) << "\n";);
|
||||
set_x(e);
|
||||
}
|
||||
} else if (m_arith.is_int_real(e)) {
|
||||
set_number(e, get_number(v));
|
||||
} else if (m.is_value(v)) {
|
||||
set_value(e, v);
|
||||
} else if (m_values.find(v, w)) {
|
||||
set_value(e, w);
|
||||
} else {
|
||||
TRACE("old_spacer", tout << "not inherited:\n" << mk_pp(e, m) << "\n" << mk_pp(v, m) << "\n";);
|
||||
set_x(e);
|
||||
}
|
||||
}
|
||||
|
||||
void model_evaluator::eval_exprs(expr_ref_vector& es)
|
||||
{
|
||||
model_ref mr(m_model);
|
||||
for (unsigned j = 0; j < es.size(); ++j) {
|
||||
if (m_array.is_as_array(es[j].get())) {
|
||||
es[j] = eval(mr, es[j].get());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool model_evaluator::extract_array_func_interp(expr* a, vector<expr_ref_vector>& stores, expr_ref& else_case)
|
||||
{
|
||||
SASSERT(m_array.is_array(a));
|
||||
|
||||
TRACE("old_spacer", tout << mk_pp(a, m) << "\n";);
|
||||
while (m_array.is_store(a)) {
|
||||
expr_ref_vector store(m);
|
||||
store.append(to_app(a)->get_num_args() - 1, to_app(a)->get_args() + 1);
|
||||
eval_exprs(store);
|
||||
stores.push_back(store);
|
||||
a = to_app(a)->get_arg(0);
|
||||
}
|
||||
|
||||
if (m_array.is_const(a)) {
|
||||
else_case = to_app(a)->get_arg(0);
|
||||
return true;
|
||||
}
|
||||
|
||||
while (m_array.is_as_array(a)) {
|
||||
func_decl* f = m_array.get_as_array_func_decl(to_app(a));
|
||||
func_interp* g = m_model->get_func_interp(f);
|
||||
unsigned sz = g->num_entries();
|
||||
unsigned arity = f->get_arity();
|
||||
for (unsigned i = 0; i < sz; ++i) {
|
||||
expr_ref_vector store(m);
|
||||
func_entry const* fe = g->get_entry(i);
|
||||
store.append(arity, fe->get_args());
|
||||
store.push_back(fe->get_result());
|
||||
for (unsigned j = 0; j < store.size(); ++j) {
|
||||
if (!is_ground(store[j].get())) {
|
||||
TRACE("old_spacer", tout << "could not extract array interpretation: " << mk_pp(a, m) << "\n" << mk_pp(store[j].get(), m) << "\n";);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
eval_exprs(store);
|
||||
stores.push_back(store);
|
||||
}
|
||||
else_case = g->get_else();
|
||||
if (!else_case) {
|
||||
TRACE("old_spacer", tout << "no else case " << mk_pp(a, m) << "\n";);
|
||||
return false;
|
||||
}
|
||||
if (!is_ground(else_case)) {
|
||||
TRACE("old_spacer", tout << "non-ground else case " << mk_pp(a, m) << "\n" << mk_pp(else_case, m) << "\n";);
|
||||
return false;
|
||||
}
|
||||
if (m_array.is_as_array(else_case)) {
|
||||
model_ref mr(m_model);
|
||||
else_case = eval(mr, else_case);
|
||||
}
|
||||
TRACE("old_spacer", tout << "else case: " << mk_pp(else_case, m) << "\n";);
|
||||
return true;
|
||||
}
|
||||
TRACE("old_spacer", tout << "no translation: " << mk_pp(a, m) << "\n";);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
best effort evaluator of extensional array equality.
|
||||
*/
|
||||
void model_evaluator::eval_array_eq(app* e, expr* arg1, expr* arg2)
|
||||
{
|
||||
TRACE("old_spacer", tout << "array equality: " << mk_pp(e, m) << "\n";);
|
||||
expr_ref v1(m), v2(m);
|
||||
m_model->eval(arg1, v1);
|
||||
m_model->eval(arg2, v2);
|
||||
if (v1 == v2) {
|
||||
set_true(e);
|
||||
return;
|
||||
}
|
||||
sort* s = m.get_sort(arg1);
|
||||
sort* r = get_array_range(s);
|
||||
// give up evaluating finite domain/range arrays
|
||||
if (!r->is_infinite() && !r->is_very_big() && !s->is_infinite() && !s->is_very_big()) {
|
||||
TRACE("old_spacer", tout << "equality is unknown: " << mk_pp(e, m) << "\n";);
|
||||
set_x(e);
|
||||
return;
|
||||
}
|
||||
vector<expr_ref_vector> store;
|
||||
expr_ref else1(m), else2(m);
|
||||
if (!extract_array_func_interp(v1, store, else1) ||
|
||||
!extract_array_func_interp(v2, store, else2)) {
|
||||
TRACE("old_spacer", tout << "equality is unknown: " << mk_pp(e, m) << "\n";);
|
||||
set_x(e);
|
||||
return;
|
||||
}
|
||||
|
||||
if (else1 != else2) {
|
||||
if (m.is_value(else1) && m.is_value(else2)) {
|
||||
TRACE("old_spacer", tout
|
||||
<< "defaults are different: " << mk_pp(e, m) << " "
|
||||
<< mk_pp(else1, m) << " " << mk_pp(else2, m) << "\n";);
|
||||
set_false(e);
|
||||
} else if (m_array.is_array(else1)) {
|
||||
eval_array_eq(e, else1, else2);
|
||||
} else {
|
||||
TRACE("old_spacer", tout << "equality is unknown: " << mk_pp(e, m) << "\n";);
|
||||
set_x(e);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
expr_ref s1(m), s2(m), w1(m), w2(m);
|
||||
expr_ref_vector args1(m), args2(m);
|
||||
args1.push_back(v1);
|
||||
args2.push_back(v2);
|
||||
for (unsigned i = 0; i < store.size(); ++i) {
|
||||
args1.resize(1);
|
||||
args2.resize(1);
|
||||
args1.append(store[i].size() - 1, store[i].c_ptr());
|
||||
args2.append(store[i].size() - 1, store[i].c_ptr());
|
||||
s1 = m_array.mk_select(args1.size(), args1.c_ptr());
|
||||
s2 = m_array.mk_select(args2.size(), args2.c_ptr());
|
||||
m_model->eval(s1, w1);
|
||||
m_model->eval(s2, w2);
|
||||
if (w1 == w2) {
|
||||
continue;
|
||||
}
|
||||
if (m.is_value(w1) && m.is_value(w2)) {
|
||||
TRACE("old_spacer", tout << "Equality evaluation: " << mk_pp(e, m) << "\n";
|
||||
tout << mk_pp(s1, m) << " |-> " << mk_pp(w1, m) << "\n";
|
||||
tout << mk_pp(s2, m) << " |-> " << mk_pp(w2, m) << "\n";);
|
||||
set_false(e);
|
||||
} else if (m_array.is_array(w1)) {
|
||||
eval_array_eq(e, w1, w2);
|
||||
if (is_true(e)) {
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
TRACE("old_spacer", tout << "equality is unknown: " << mk_pp(e, m) << "\n";);
|
||||
set_x(e);
|
||||
}
|
||||
return;
|
||||
}
|
||||
set_true(e);
|
||||
}
|
||||
|
||||
void model_evaluator::eval_eq(app* e, expr* arg1, expr* arg2)
|
||||
{
|
||||
if (arg1 == arg2) {
|
||||
set_true(e);
|
||||
} else if (m_array.is_array(arg1)) {
|
||||
eval_array_eq(e, arg1, arg2);
|
||||
} else if (is_x(arg1) || is_x(arg2)) {
|
||||
set_x(e);
|
||||
} else if (m.is_bool(arg1)) {
|
||||
bool val = is_true(arg1) == is_true(arg2);
|
||||
SASSERT(val == (is_false(arg1) == is_false(arg2)));
|
||||
if (val) {
|
||||
set_true(e);
|
||||
} else {
|
||||
set_false(e);
|
||||
}
|
||||
} else if (m_arith.is_int_real(arg1)) {
|
||||
set_bool(e, get_number(arg1) == get_number(arg2));
|
||||
} else {
|
||||
expr* e1 = get_value(arg1);
|
||||
expr* e2 = get_value(arg2);
|
||||
if (m.is_value(e1) && m.is_value(e2)) {
|
||||
set_bool(e, e1 == e2);
|
||||
} else if (e1 == e2) {
|
||||
set_bool(e, true);
|
||||
} else {
|
||||
TRACE("old_spacer", tout << "not value equal:\n" << mk_pp(e1, m) << "\n" << mk_pp(e2, m) << "\n";);
|
||||
set_x(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void model_evaluator::eval_basic(app* e)
|
||||
{
|
||||
expr* arg1, *arg2;
|
||||
expr *argCond, *argThen, *argElse, *arg;
|
||||
bool has_x = false;
|
||||
unsigned arity = e->get_num_args();
|
||||
switch (e->get_decl_kind()) {
|
||||
case OP_AND:
|
||||
for (unsigned j = 0; j < arity; ++j) {
|
||||
expr * arg = e->get_arg(j);
|
||||
if (is_false(arg)) {
|
||||
set_false(e);
|
||||
return;
|
||||
} else if (is_x(arg)) {
|
||||
has_x = true;
|
||||
} else {
|
||||
SASSERT(is_true(arg));
|
||||
}
|
||||
}
|
||||
if (has_x) {
|
||||
set_x(e);
|
||||
} else {
|
||||
set_true(e);
|
||||
}
|
||||
break;
|
||||
case OP_OR:
|
||||
for (unsigned j = 0; j < arity; ++j) {
|
||||
expr * arg = e->get_arg(j);
|
||||
if (is_true(arg)) {
|
||||
set_true(e);
|
||||
return;
|
||||
} else if (is_x(arg)) {
|
||||
has_x = true;
|
||||
} else {
|
||||
SASSERT(is_false(arg));
|
||||
}
|
||||
}
|
||||
if (has_x) {
|
||||
set_x(e);
|
||||
} else {
|
||||
set_false(e);
|
||||
}
|
||||
break;
|
||||
case OP_NOT:
|
||||
VERIFY(m.is_not(e, arg));
|
||||
if (is_true(arg)) {
|
||||
set_false(e);
|
||||
} else if (is_false(arg)) {
|
||||
set_true(e);
|
||||
} else {
|
||||
SASSERT(is_x(arg));
|
||||
set_x(e);
|
||||
}
|
||||
break;
|
||||
case OP_IMPLIES:
|
||||
VERIFY(m.is_implies(e, arg1, arg2));
|
||||
if (is_false(arg1) || is_true(arg2)) {
|
||||
set_true(e);
|
||||
} else if (arg1 == arg2) {
|
||||
set_true(e);
|
||||
} else if (is_true(arg1) && is_false(arg2)) {
|
||||
set_false(e);
|
||||
} else {
|
||||
SASSERT(is_x(arg1) || is_x(arg2));
|
||||
set_x(e);
|
||||
}
|
||||
break;
|
||||
case OP_IFF:
|
||||
VERIFY(m.is_iff(e, arg1, arg2));
|
||||
eval_eq(e, arg1, arg2);
|
||||
break;
|
||||
case OP_XOR:
|
||||
VERIFY(m.is_xor(e, arg1, arg2));
|
||||
eval_eq(e, arg1, arg2);
|
||||
if (is_false(e)) { set_true(e); }
|
||||
else if (is_true(e)) { set_false(e); }
|
||||
break;
|
||||
case OP_ITE:
|
||||
VERIFY(m.is_ite(e, argCond, argThen, argElse));
|
||||
if (is_true(argCond)) {
|
||||
inherit_value(e, argThen);
|
||||
} else if (is_false(argCond)) {
|
||||
inherit_value(e, argElse);
|
||||
} else if (argThen == argElse) {
|
||||
inherit_value(e, argThen);
|
||||
} else if (m.is_bool(e)) {
|
||||
SASSERT(is_x(argCond));
|
||||
if (is_x(argThen) || is_x(argElse)) {
|
||||
set_x(e);
|
||||
} else if (is_true(argThen) == is_true(argElse)) {
|
||||
inherit_value(e, argThen);
|
||||
} else {
|
||||
set_x(e);
|
||||
}
|
||||
} else {
|
||||
set_x(e);
|
||||
}
|
||||
break;
|
||||
case OP_TRUE:
|
||||
set_true(e);
|
||||
break;
|
||||
case OP_FALSE:
|
||||
set_false(e);
|
||||
break;
|
||||
case OP_EQ:
|
||||
VERIFY(m.is_eq(e, arg1, arg2));
|
||||
eval_eq(e, arg1, arg2);
|
||||
break;
|
||||
case OP_DISTINCT: {
|
||||
vector<rational> values;
|
||||
for (unsigned i = 0; i < arity; ++i) {
|
||||
expr* arg = e->get_arg(i);
|
||||
if (is_x(arg)) {
|
||||
set_x(e);
|
||||
return;
|
||||
}
|
||||
values.push_back(get_number(arg));
|
||||
}
|
||||
std::sort(values.begin(), values.end());
|
||||
for (unsigned i = 0; i + 1 < values.size(); ++i) {
|
||||
if (values[i] == values[i + 1]) {
|
||||
set_false(e);
|
||||
return;
|
||||
}
|
||||
}
|
||||
set_true(e);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
IF_VERBOSE(0, verbose_stream() << "Term not handled " << mk_pp(e, m) << "\n";);
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
void model_evaluator::eval_fmls(ptr_vector<expr> const& formulas)
|
||||
{
|
||||
ptr_vector<expr> todo(formulas);
|
||||
|
||||
while (!todo.empty()) {
|
||||
expr * curr_e = todo.back();
|
||||
|
||||
if (!is_app(curr_e)) {
|
||||
todo.pop_back();
|
||||
continue;
|
||||
}
|
||||
app * curr = to_app(curr_e);
|
||||
|
||||
if (!is_unknown(curr)) {
|
||||
todo.pop_back();
|
||||
continue;
|
||||
}
|
||||
unsigned arity = curr->get_num_args();
|
||||
for (unsigned i = 0; i < arity; ++i) {
|
||||
if (is_unknown(curr->get_arg(i))) {
|
||||
todo.push_back(curr->get_arg(i));
|
||||
}
|
||||
}
|
||||
if (todo.back() != curr) {
|
||||
continue;
|
||||
}
|
||||
todo.pop_back();
|
||||
if (curr->get_family_id() == m_arith.get_family_id()) {
|
||||
eval_arith(curr);
|
||||
} else if (curr->get_family_id() == m.get_basic_family_id()) {
|
||||
eval_basic(curr);
|
||||
} else {
|
||||
expr_ref vl(m);
|
||||
m_model->eval(curr, vl);
|
||||
assign_value(curr, vl);
|
||||
}
|
||||
|
||||
IF_VERBOSE(35, verbose_stream() << "assigned " << mk_pp(curr_e, m)
|
||||
<< (is_true(curr_e) ? "true" : is_false(curr_e) ? "false" : "unknown") << "\n";);
|
||||
SASSERT(!is_unknown(curr));
|
||||
}
|
||||
}
|
||||
|
||||
bool model_evaluator::check_model(ptr_vector<expr> const& formulas)
|
||||
{
|
||||
eval_fmls(formulas);
|
||||
bool has_x = false;
|
||||
for (unsigned i = 0; i < formulas.size(); ++i) {
|
||||
expr * form = formulas[i];
|
||||
SASSERT(!is_unknown(form));
|
||||
TRACE("spacer_verbose",
|
||||
tout << "formula is " << (is_true(form) ? "true" : is_false(form) ? "false" : "unknown") << "\n" << mk_pp(form, m) << "\n";);
|
||||
|
||||
if (is_false(form)) {
|
||||
IF_VERBOSE(0, verbose_stream() << "formula false in model: " << mk_pp(form, m) << "\n";);
|
||||
UNREACHABLE();
|
||||
}
|
||||
if (is_x(form)) {
|
||||
IF_VERBOSE(0, verbose_stream() << "formula undetermined in model: " << mk_pp(form, m) << "\n";);
|
||||
TRACE("old_spacer", model_smt2_pp(tout, m, *m_model, 0););
|
||||
has_x = true;
|
||||
}
|
||||
}
|
||||
return !has_x;
|
||||
}
|
||||
|
||||
expr_ref model_evaluator::eval_heavy(const model_ref& model, expr* fml)
|
||||
{
|
||||
expr_ref result(model->get_manager());
|
||||
|
||||
setup_model(model);
|
||||
ptr_vector<expr> fmls;
|
||||
fmls.push_back(fml);
|
||||
eval_fmls(fmls);
|
||||
if (is_false(fml)) {
|
||||
result = m.mk_false();
|
||||
} else if (is_true(fml) || is_x(fml)) {
|
||||
result = m.mk_true();
|
||||
} else if (m_arith.is_int_real(fml)) {
|
||||
result = m_arith.mk_numeral(get_number(fml), m_arith.is_int(fml));
|
||||
} else {
|
||||
result = get_value(fml);
|
||||
}
|
||||
reset();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
expr_ref model_evaluator::eval(const model_ref& model, func_decl* d)
|
||||
{
|
||||
SASSERT(d->get_arity() == 0);
|
||||
expr_ref result(m);
|
||||
if (m_array.is_array(d->get_range())) {
|
||||
expr_ref e(m);
|
||||
e = m.mk_const(d);
|
||||
result = eval(model, e);
|
||||
} else {
|
||||
result = model->get_const_interp(d);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
expr_ref model_evaluator::eval(const model_ref& model, expr* e)
|
||||
{
|
||||
expr_ref result(m);
|
||||
m_model = model.get();
|
||||
VERIFY(m_model->eval(e, result, true));
|
||||
if (m_array.is_array(e)) {
|
||||
vector<expr_ref_vector> stores;
|
||||
expr_ref_vector args(m);
|
||||
expr_ref else_case(m);
|
||||
if (extract_array_func_interp(result, stores, else_case)) {
|
||||
result = m_array.mk_const_array(m.get_sort(e), else_case);
|
||||
while (!stores.empty() && stores.back().back() == else_case) {
|
||||
stores.pop_back();
|
||||
}
|
||||
for (unsigned i = stores.size(); i > 0;) {
|
||||
--i;
|
||||
args.resize(1);
|
||||
args[0] = result;
|
||||
args.append(stores[i]);
|
||||
result = m_array.mk_store(args.size(), args.c_ptr());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
}
|
117
src/muz/spacer/spacer_legacy_mev.h
Normal file
117
src/muz/spacer/spacer_legacy_mev.h
Normal file
|
@ -0,0 +1,117 @@
|
|||
/**
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Deprecated implementation of model evaluator. To be removed.
|
||||
*/
|
||||
#ifndef OLD_MEV_H
|
||||
#define OLD_MEV_H
|
||||
|
||||
#include "ast.h"
|
||||
#include "ast_pp.h"
|
||||
#include "obj_hashtable.h"
|
||||
#include "ref_vector.h"
|
||||
#include "simplifier.h"
|
||||
#include "trace.h"
|
||||
#include "vector.h"
|
||||
#include "arith_decl_plugin.h"
|
||||
#include "array_decl_plugin.h"
|
||||
#include "bv_decl_plugin.h"
|
||||
|
||||
namespace old {
|
||||
class model_evaluator {
|
||||
ast_manager& m;
|
||||
arith_util m_arith;
|
||||
array_util m_array;
|
||||
obj_map<expr, rational> m_numbers;
|
||||
expr_ref_vector m_refs;
|
||||
obj_map<expr, expr*> m_values;
|
||||
model_ref m_model;
|
||||
|
||||
//00 -- non-visited
|
||||
//01 -- X
|
||||
//10 -- false
|
||||
//11 -- true
|
||||
expr_mark m1;
|
||||
expr_mark m2;
|
||||
|
||||
/// used by collect()
|
||||
expr_mark m_visited;
|
||||
|
||||
|
||||
|
||||
void reset();
|
||||
|
||||
/// caches the values of all constants in the given model
|
||||
void setup_model(const model_ref& model);
|
||||
/// caches the value of an expression
|
||||
void assign_value(expr* e, expr* v);
|
||||
|
||||
/// extracts an implicant of the conjunction of formulas
|
||||
void collect(ptr_vector<expr> const& formulas, ptr_vector<expr>& tocollect);
|
||||
|
||||
/// one-round of extracting an implicant of e. The implicant
|
||||
/// literals are stored in tocollect. The worklist is stored in todo
|
||||
void process_formula(app* e, ptr_vector<expr>& todo, ptr_vector<expr>& tocollect);
|
||||
void eval_arith(app* e);
|
||||
void eval_basic(app* e);
|
||||
void eval_eq(app* e, expr* arg1, expr* arg2);
|
||||
void eval_array_eq(app* e, expr* arg1, expr* arg2);
|
||||
void inherit_value(expr* e, expr* v);
|
||||
|
||||
bool is_unknown(expr* x) { return !m1.is_marked(x) && !m2.is_marked(x); }
|
||||
void set_unknown(expr* x) { m1.mark(x, false); m2.mark(x, false); }
|
||||
bool is_x(expr* x) { return !m1.is_marked(x) && m2.is_marked(x); }
|
||||
bool is_false(expr* x) { return m1.is_marked(x) && !m2.is_marked(x); }
|
||||
bool is_true(expr* x) { return m1.is_marked(x) && m2.is_marked(x); }
|
||||
void set_x(expr* x) { SASSERT(is_unknown(x)); m2.mark(x); }
|
||||
void set_v(expr* x) { SASSERT(is_unknown(x)); m1.mark(x); }
|
||||
void set_false(expr* x) { SASSERT(is_unknown(x)); m1.mark(x); }
|
||||
void set_true(expr* x) { SASSERT(is_unknown(x)); m1.mark(x); m2.mark(x); }
|
||||
void set_bool(expr* x, bool v) { if(v) { set_true(x); } else { set_false(x); } }
|
||||
rational const& get_number(expr* x) const { return m_numbers.find(x); }
|
||||
void set_number(expr* x, rational const& v)
|
||||
{
|
||||
set_v(x);
|
||||
m_numbers.insert(x, v);
|
||||
TRACE("spacer_verbose", tout << mk_pp(x, m) << " " << v << "\n";);
|
||||
}
|
||||
expr* get_value(expr* x) { return m_values.find(x); }
|
||||
void set_value(expr* x, expr* v)
|
||||
{ set_v(x); m_refs.push_back(v); m_values.insert(x, v); }
|
||||
|
||||
|
||||
/// evaluates all sub-formulas and terms of the input in the current model.
|
||||
/// Caches the result
|
||||
void eval_fmls(ptr_vector<expr> const & formulas);
|
||||
|
||||
/// calls eval_fmls(). Then checks whether all formulas are
|
||||
/// TRUE. Returns false if at lest one formula is unknown (X)
|
||||
bool check_model(ptr_vector<expr> const & formulas);
|
||||
|
||||
bool extract_array_func_interp(expr* a, vector<expr_ref_vector>& stores,
|
||||
expr_ref& else_case);
|
||||
|
||||
void eval_exprs(expr_ref_vector& es);
|
||||
|
||||
public:
|
||||
model_evaluator(ast_manager& m) : m(m), m_arith(m), m_array(m), m_refs(m) {}
|
||||
|
||||
|
||||
/**
|
||||
\brief extract literals from formulas that satisfy formulas.
|
||||
|
||||
\pre model satisfies formulas
|
||||
*/
|
||||
void minimize_literals(ptr_vector<expr> const & formulas, const model_ref& mdl,
|
||||
expr_ref_vector& result);
|
||||
|
||||
expr_ref eval_heavy(const model_ref& mdl, expr* fml);
|
||||
|
||||
expr_ref eval(const model_ref& mdl, expr* e);
|
||||
expr_ref eval(const model_ref& mdl, func_decl* d);
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
|
||||
#endif /* OLD_MEV_H */
|
386
src/muz/spacer/spacer_manager.cpp
Normal file
386
src/muz/spacer/spacer_manager.cpp
Normal file
|
@ -0,0 +1,386 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_manager.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
A manager class for SPACER, taking care of creating of AST
|
||||
objects and conversions between them.
|
||||
|
||||
Author:
|
||||
|
||||
Krystof Hoder (t-khoder) 2011-8-25.
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#include <sstream>
|
||||
#include "spacer_manager.h"
|
||||
#include "ast_smt2_pp.h"
|
||||
#include "for_each_expr.h"
|
||||
#include "has_free_vars.h"
|
||||
#include "expr_replacer.h"
|
||||
#include "expr_abstract.h"
|
||||
#include "model2expr.h"
|
||||
#include "model_smt2_pp.h"
|
||||
#include "model_converter.h"
|
||||
|
||||
namespace spacer {
|
||||
|
||||
class collect_decls_proc {
|
||||
func_decl_set& m_bound_decls;
|
||||
func_decl_set& m_aux_decls;
|
||||
public:
|
||||
collect_decls_proc(func_decl_set& bound_decls, func_decl_set& aux_decls):
|
||||
m_bound_decls(bound_decls),
|
||||
m_aux_decls(aux_decls)
|
||||
{
|
||||
}
|
||||
|
||||
void operator()(app* a)
|
||||
{
|
||||
if (a->get_family_id() == null_family_id) {
|
||||
func_decl* f = a->get_decl();
|
||||
if (!m_bound_decls.contains(f)) {
|
||||
m_aux_decls.insert(f);
|
||||
}
|
||||
}
|
||||
}
|
||||
void operator()(var* v) {}
|
||||
void operator()(quantifier* q) {}
|
||||
};
|
||||
|
||||
typedef hashtable<symbol, symbol_hash_proc, symbol_eq_proc> symbol_set;
|
||||
|
||||
expr_ref inductive_property::fixup_clause(expr* fml) const
|
||||
{
|
||||
expr_ref_vector disjs(m);
|
||||
flatten_or(fml, disjs);
|
||||
expr_ref result(m);
|
||||
bool_rewriter(m).mk_or(disjs.size(), disjs.c_ptr(), result);
|
||||
return result;
|
||||
}
|
||||
|
||||
expr_ref inductive_property::fixup_clauses(expr* fml) const
|
||||
{
|
||||
expr_ref_vector conjs(m);
|
||||
expr_ref result(m);
|
||||
flatten_and(fml, conjs);
|
||||
for (unsigned i = 0; i < conjs.size(); ++i) {
|
||||
conjs[i] = fixup_clause(conjs[i].get());
|
||||
}
|
||||
bool_rewriter(m).mk_and(conjs.size(), conjs.c_ptr(), result);
|
||||
return result;
|
||||
}
|
||||
|
||||
std::string inductive_property::to_string() const
|
||||
{
|
||||
std::stringstream stm;
|
||||
model_ref md;
|
||||
expr_ref result(m);
|
||||
to_model(md);
|
||||
model_smt2_pp(stm, m, *md.get(), 0);
|
||||
return stm.str();
|
||||
}
|
||||
|
||||
void inductive_property::to_model(model_ref& md) const
|
||||
{
|
||||
md = alloc(model, m);
|
||||
vector<relation_info> const& rs = m_relation_info;
|
||||
expr_ref_vector conjs(m);
|
||||
for (unsigned i = 0; i < rs.size(); ++i) {
|
||||
relation_info ri(rs[i]);
|
||||
func_decl * pred = ri.m_pred;
|
||||
expr_ref prop = fixup_clauses(ri.m_body);
|
||||
func_decl_ref_vector const& sig = ri.m_vars;
|
||||
expr_ref q(m);
|
||||
expr_ref_vector sig_vars(m);
|
||||
for (unsigned j = 0; j < sig.size(); ++j) {
|
||||
sig_vars.push_back(m.mk_const(sig[sig.size() - j - 1]));
|
||||
}
|
||||
expr_abstract(m, 0, sig_vars.size(), sig_vars.c_ptr(), prop, q);
|
||||
if (sig.empty()) {
|
||||
md->register_decl(pred, q);
|
||||
} else {
|
||||
func_interp* fi = alloc(func_interp, m, sig.size());
|
||||
fi->set_else(q);
|
||||
md->register_decl(pred, fi);
|
||||
}
|
||||
}
|
||||
TRACE("spacer", model_smt2_pp(tout, m, *md, 0););
|
||||
apply(const_cast<model_converter_ref&>(m_mc), md, 0);
|
||||
}
|
||||
|
||||
expr_ref inductive_property::to_expr() const
|
||||
{
|
||||
model_ref md;
|
||||
expr_ref result(m);
|
||||
to_model(md);
|
||||
model2expr(md, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
void inductive_property::display(datalog::rule_manager& rm, ptr_vector<datalog::rule> const& rules, std::ostream& out) const
|
||||
{
|
||||
func_decl_set bound_decls, aux_decls;
|
||||
collect_decls_proc collect_decls(bound_decls, aux_decls);
|
||||
|
||||
for (unsigned i = 0; i < m_relation_info.size(); ++i) {
|
||||
bound_decls.insert(m_relation_info[i].m_pred);
|
||||
func_decl_ref_vector const& sig = m_relation_info[i].m_vars;
|
||||
for (unsigned j = 0; j < sig.size(); ++j) {
|
||||
bound_decls.insert(sig[j]);
|
||||
}
|
||||
for_each_expr(collect_decls, m_relation_info[i].m_body);
|
||||
}
|
||||
for (unsigned i = 0; i < rules.size(); ++i) {
|
||||
bound_decls.insert(rules[i]->get_decl());
|
||||
}
|
||||
for (unsigned i = 0; i < rules.size(); ++i) {
|
||||
unsigned u_sz = rules[i]->get_uninterpreted_tail_size();
|
||||
unsigned t_sz = rules[i]->get_tail_size();
|
||||
for (unsigned j = u_sz; j < t_sz; ++j) {
|
||||
for_each_expr(collect_decls, rules[i]->get_tail(j));
|
||||
}
|
||||
}
|
||||
smt2_pp_environment_dbg env(m);
|
||||
func_decl_set::iterator it = aux_decls.begin(), end = aux_decls.end();
|
||||
for (; it != end; ++it) {
|
||||
func_decl* f = *it;
|
||||
ast_smt2_pp(out, f, env);
|
||||
out << "\n";
|
||||
}
|
||||
|
||||
out << to_string() << "\n";
|
||||
for (unsigned i = 0; i < rules.size(); ++i) {
|
||||
out << "(push)\n";
|
||||
out << "(assert (not\n";
|
||||
rm.display_smt2(*rules[i], out);
|
||||
out << "))\n";
|
||||
out << "(check-sat)\n";
|
||||
out << "(pop)\n";
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::string> manager::get_state_suffixes()
|
||||
{
|
||||
std::vector<std::string> res;
|
||||
res.push_back("_n");
|
||||
return res;
|
||||
}
|
||||
|
||||
manager::manager(unsigned max_num_contexts, ast_manager& manager) :
|
||||
m(manager),
|
||||
m_brwr(m),
|
||||
m_mux(m, get_state_suffixes()),
|
||||
m_background(m.mk_true(), m),
|
||||
m_contexts(m, max_num_contexts),
|
||||
m_contexts2(m, max_num_contexts),
|
||||
m_contexts3(m, max_num_contexts),
|
||||
m_next_unique_num(0)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void manager::add_new_state(func_decl * s)
|
||||
{
|
||||
SASSERT(s->get_arity() == 0); //we currently don't support non-constant states
|
||||
decl_vector vect;
|
||||
|
||||
SASSERT(o_index(0) == 1); //we assume this in the number of retrieved symbols
|
||||
m_mux.create_tuple(s, s->get_arity(), s->get_domain(), s->get_range(), 2, vect);
|
||||
m_o0_preds.push_back(vect[o_index(0)]);
|
||||
}
|
||||
|
||||
func_decl * manager::get_o_pred(func_decl* s, unsigned idx)
|
||||
{
|
||||
func_decl * res = m_mux.try_get_by_prefix(s, o_index(idx));
|
||||
if (res) { return res; }
|
||||
add_new_state(s);
|
||||
res = m_mux.try_get_by_prefix(s, o_index(idx));
|
||||
SASSERT(res);
|
||||
return res;
|
||||
}
|
||||
|
||||
func_decl * manager::get_n_pred(func_decl* s)
|
||||
{
|
||||
func_decl * res = m_mux.try_get_by_prefix(s, n_index());
|
||||
if (res) { return res; }
|
||||
add_new_state(s);
|
||||
res = m_mux.try_get_by_prefix(s, n_index());
|
||||
SASSERT(res);
|
||||
return res;
|
||||
}
|
||||
|
||||
void manager::mk_model_into_cube(const expr_ref_vector & mdl, expr_ref & res)
|
||||
{
|
||||
m_brwr.mk_and(mdl.size(), mdl.c_ptr(), res);
|
||||
}
|
||||
|
||||
void manager::mk_core_into_cube(const expr_ref_vector & core, expr_ref & res)
|
||||
{
|
||||
m_brwr.mk_and(core.size(), core.c_ptr(), res);
|
||||
}
|
||||
|
||||
void manager::mk_cube_into_lemma(expr * cube, expr_ref & res)
|
||||
{
|
||||
m_brwr.mk_not(cube, res);
|
||||
}
|
||||
|
||||
void manager::mk_lemma_into_cube(expr * lemma, expr_ref & res)
|
||||
{
|
||||
m_brwr.mk_not(lemma, res);
|
||||
}
|
||||
|
||||
expr_ref manager::mk_and(unsigned sz, expr* const* exprs)
|
||||
{
|
||||
expr_ref result(m);
|
||||
m_brwr.mk_and(sz, exprs, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
expr_ref manager::mk_or(unsigned sz, expr* const* exprs)
|
||||
{
|
||||
expr_ref result(m);
|
||||
m_brwr.mk_or(sz, exprs, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
expr_ref manager::mk_not_and(expr_ref_vector const& conjs)
|
||||
{
|
||||
expr_ref result(m), e(m);
|
||||
expr_ref_vector es(conjs);
|
||||
flatten_and(es);
|
||||
for (unsigned i = 0; i < es.size(); ++i) {
|
||||
m_brwr.mk_not(es[i].get(), e);
|
||||
es[i] = e;
|
||||
}
|
||||
m_brwr.mk_or(es.size(), es.c_ptr(), result);
|
||||
return result;
|
||||
}
|
||||
|
||||
void manager::get_or(expr* e, expr_ref_vector& result)
|
||||
{
|
||||
result.push_back(e);
|
||||
for (unsigned i = 0; i < result.size();) {
|
||||
e = result[i].get();
|
||||
if (m.is_or(e)) {
|
||||
result.append(to_app(e)->get_num_args(), to_app(e)->get_args());
|
||||
result[i] = result.back();
|
||||
result.pop_back();
|
||||
} else {
|
||||
++i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool manager::try_get_state_and_value_from_atom(expr * atom0, app *& state, app_ref& value)
|
||||
{
|
||||
if (!is_app(atom0)) {
|
||||
return false;
|
||||
}
|
||||
app * atom = to_app(atom0);
|
||||
expr * arg1;
|
||||
expr * arg2;
|
||||
app * candidate_state;
|
||||
app_ref candidate_value(m);
|
||||
if (m.is_not(atom, arg1)) {
|
||||
if (!is_app(arg1)) {
|
||||
return false;
|
||||
}
|
||||
candidate_state = to_app(arg1);
|
||||
candidate_value = m.mk_false();
|
||||
} else if (m.is_eq(atom, arg1, arg2)) {
|
||||
if (!is_app(arg1) || !is_app(arg2)) {
|
||||
return false;
|
||||
}
|
||||
if (!m_mux.is_muxed(to_app(arg1)->get_decl())) {
|
||||
std::swap(arg1, arg2);
|
||||
}
|
||||
candidate_state = to_app(arg1);
|
||||
candidate_value = to_app(arg2);
|
||||
} else {
|
||||
candidate_state = atom;
|
||||
candidate_value = m.mk_true();
|
||||
}
|
||||
if (!m_mux.is_muxed(candidate_state->get_decl())) {
|
||||
return false;
|
||||
}
|
||||
state = candidate_state;
|
||||
value = candidate_value;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool manager::try_get_state_decl_from_atom(expr * atom, func_decl *& state)
|
||||
{
|
||||
app_ref dummy_value_holder(m);
|
||||
app * s;
|
||||
if (try_get_state_and_value_from_atom(atom, s, dummy_value_holder)) {
|
||||
state = s->get_decl();
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new skolem constant
|
||||
*/
|
||||
app* mk_zk_const(ast_manager &m, unsigned idx, sort *s) {
|
||||
std::stringstream name;
|
||||
name << "sk!" << idx;
|
||||
return m.mk_const(symbol(name.str().c_str()), s);
|
||||
}
|
||||
|
||||
namespace find_zk_const_ns {
|
||||
struct proc {
|
||||
app_ref_vector &m_out;
|
||||
proc (app_ref_vector &out) : m_out(out) {}
|
||||
void operator() (var const * n) const {}
|
||||
void operator() (app *n) const {
|
||||
if (is_uninterp_const(n) &&
|
||||
n->get_decl()->get_name().str().compare (0, 3, "sk!") == 0) {
|
||||
m_out.push_back (n);
|
||||
}
|
||||
}
|
||||
void operator() (quantifier const *n) const {}
|
||||
};
|
||||
}
|
||||
|
||||
void find_zk_const(expr *e, app_ref_vector &res) {
|
||||
find_zk_const_ns::proc p(res);
|
||||
for_each_expr (p, e);
|
||||
}
|
||||
|
||||
namespace has_zk_const_ns {
|
||||
struct found {};
|
||||
struct proc {
|
||||
void operator() (var const *n) const {}
|
||||
void operator() (app const *n) const {
|
||||
if (is_uninterp_const(n) &&
|
||||
n->get_decl()->get_name().str().compare(0, 3, "sk!") == 0) {
|
||||
throw found();
|
||||
}
|
||||
}
|
||||
void operator() (quantifier const *n) const {}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
bool has_zk_const(expr *e){
|
||||
has_zk_const_ns::proc p;
|
||||
try {
|
||||
for_each_expr(p, e);
|
||||
}
|
||||
catch (has_zk_const_ns::found) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
345
src/muz/spacer/spacer_manager.h
Normal file
345
src/muz/spacer/spacer_manager.h
Normal file
|
@ -0,0 +1,345 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_manager.h
|
||||
|
||||
Abstract:
|
||||
|
||||
A manager class for SPACER, taking care of creating of AST
|
||||
objects and conversions between them.
|
||||
|
||||
Author:
|
||||
|
||||
Krystof Hoder (t-khoder) 2011-8-25.
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef _SPACER_MANAGER_H_
|
||||
#define _SPACER_MANAGER_H_
|
||||
|
||||
#include <utility>
|
||||
#include <map>
|
||||
#include "bool_rewriter.h"
|
||||
#include "expr_replacer.h"
|
||||
#include "expr_substitution.h"
|
||||
#include "map.h"
|
||||
#include "ref_vector.h"
|
||||
#include "smt_kernel.h"
|
||||
#include "spacer_util.h"
|
||||
#include "spacer_sym_mux.h"
|
||||
#include "spacer_farkas_learner.h"
|
||||
#include "spacer_smt_context_manager.h"
|
||||
#include "dl_rule.h"
|
||||
#include <vector>
|
||||
|
||||
namespace smt {
|
||||
class context;
|
||||
}
|
||||
|
||||
namespace spacer {
|
||||
|
||||
struct relation_info {
|
||||
func_decl_ref m_pred;
|
||||
func_decl_ref_vector m_vars;
|
||||
expr_ref m_body;
|
||||
relation_info(ast_manager& m, func_decl* pred, ptr_vector<func_decl> const& vars, expr* b):
|
||||
m_pred(pred, m), m_vars(m, vars.size(), vars.c_ptr()), m_body(b, m) {}
|
||||
relation_info(relation_info const& other): m_pred(other.m_pred), m_vars(other.m_vars), m_body(other.m_body) {}
|
||||
};
|
||||
|
||||
class unknown_exception {};
|
||||
|
||||
class inductive_property {
|
||||
ast_manager& m;
|
||||
model_converter_ref m_mc;
|
||||
vector<relation_info> m_relation_info;
|
||||
expr_ref fixup_clauses(expr* property) const;
|
||||
expr_ref fixup_clause(expr* clause) const;
|
||||
public:
|
||||
inductive_property(ast_manager& m, model_converter_ref& mc, vector<relation_info> const& relations):
|
||||
m(m),
|
||||
m_mc(mc),
|
||||
m_relation_info(relations) {}
|
||||
|
||||
std::string to_string() const;
|
||||
|
||||
expr_ref to_expr() const;
|
||||
|
||||
void to_model(model_ref& md) const;
|
||||
|
||||
void display(datalog::rule_manager& rm, ptr_vector<datalog::rule> const& rules, std::ostream& out) const;
|
||||
};
|
||||
|
||||
class manager {
|
||||
ast_manager& m;
|
||||
|
||||
mutable bool_rewriter m_brwr;
|
||||
|
||||
sym_mux m_mux;
|
||||
expr_ref m_background;
|
||||
decl_vector m_o0_preds;
|
||||
spacer::smt_context_manager m_contexts;
|
||||
spacer::smt_context_manager m_contexts2;
|
||||
spacer::smt_context_manager m_contexts3;
|
||||
|
||||
/** whenever we need an unique number, we get this one and increase */
|
||||
unsigned m_next_unique_num;
|
||||
|
||||
|
||||
static std::vector<std::string> get_state_suffixes();
|
||||
|
||||
unsigned n_index() const { return 0; }
|
||||
unsigned o_index(unsigned i) const { return i + 1; }
|
||||
|
||||
void add_new_state(func_decl * s);
|
||||
|
||||
public:
|
||||
manager(unsigned max_num_contexts, ast_manager & manager);
|
||||
|
||||
ast_manager& get_manager() const { return m; }
|
||||
bool_rewriter& get_brwr() const { return m_brwr; }
|
||||
|
||||
expr_ref mk_and(unsigned sz, expr* const* exprs);
|
||||
expr_ref mk_and(expr_ref_vector const& exprs)
|
||||
{
|
||||
return mk_and(exprs.size(), exprs.c_ptr());
|
||||
}
|
||||
expr_ref mk_and(expr* a, expr* b)
|
||||
{
|
||||
expr* args[2] = { a, b };
|
||||
return mk_and(2, args);
|
||||
}
|
||||
expr_ref mk_or(unsigned sz, expr* const* exprs);
|
||||
expr_ref mk_or(expr_ref_vector const& exprs)
|
||||
{
|
||||
return mk_or(exprs.size(), exprs.c_ptr());
|
||||
}
|
||||
|
||||
expr_ref mk_not_and(expr_ref_vector const& exprs);
|
||||
|
||||
void get_or(expr* e, expr_ref_vector& result);
|
||||
|
||||
//"o" predicates stand for the old states and "n" for the new states
|
||||
func_decl * get_o_pred(func_decl * s, unsigned idx);
|
||||
func_decl * get_n_pred(func_decl * s);
|
||||
|
||||
/**
|
||||
Marks symbol as non-model which means it will not appear in models collected by
|
||||
get_state_cube_from_model function.
|
||||
This is to take care of auxiliary symbols introduced by the disjunction relations
|
||||
to relativize lemmas coming from disjuncts.
|
||||
*/
|
||||
void mark_as_non_model(func_decl * p)
|
||||
{
|
||||
m_mux.mark_as_non_model(p);
|
||||
}
|
||||
|
||||
|
||||
func_decl * const * begin_o0_preds() const { return m_o0_preds.begin(); }
|
||||
func_decl * const * end_o0_preds() const { return m_o0_preds.end(); }
|
||||
|
||||
bool is_state_pred(func_decl * p) const { return m_mux.is_muxed(p); }
|
||||
func_decl * to_o0(func_decl * p) { return m_mux.conv(m_mux.get_primary(p), 0, o_index(0)); }
|
||||
|
||||
bool is_o(func_decl * p, unsigned idx) const
|
||||
{
|
||||
return m_mux.has_index(p, o_index(idx));
|
||||
}
|
||||
void get_o_index(func_decl* p, unsigned& idx) const
|
||||
{
|
||||
m_mux.try_get_index(p, idx);
|
||||
SASSERT(idx != n_index());
|
||||
idx--; // m_mux has indices starting at 1
|
||||
}
|
||||
bool is_o(expr* e, unsigned idx) const
|
||||
{
|
||||
return is_app(e) && is_o(to_app(e)->get_decl(), idx);
|
||||
}
|
||||
bool is_o(func_decl * p) const
|
||||
{
|
||||
unsigned idx;
|
||||
return m_mux.try_get_index(p, idx) && idx != n_index();
|
||||
}
|
||||
bool is_o(expr* e) const
|
||||
{
|
||||
return is_app(e) && is_o(to_app(e)->get_decl());
|
||||
}
|
||||
bool is_n(func_decl * p) const
|
||||
{
|
||||
return m_mux.has_index(p, n_index());
|
||||
}
|
||||
bool is_n(expr* e) const
|
||||
{
|
||||
return is_app(e) && is_n(to_app(e)->get_decl());
|
||||
}
|
||||
|
||||
/** true if p should not appead in models propagates into child relations */
|
||||
bool is_non_model_sym(func_decl * p) const
|
||||
{ return m_mux.is_non_model_sym(p); }
|
||||
|
||||
|
||||
/** true if f doesn't contain any n predicates */
|
||||
bool is_o_formula(expr * f) const
|
||||
{
|
||||
return !m_mux.contains(f, n_index());
|
||||
}
|
||||
|
||||
/** true if f contains only o state preds of index o_idx */
|
||||
bool is_o_formula(expr * f, unsigned o_idx) const
|
||||
{
|
||||
return m_mux.is_homogenous_formula(f, o_index(o_idx));
|
||||
}
|
||||
/** true if f doesn't contain any o predicates */
|
||||
bool is_n_formula(expr * f) const
|
||||
{
|
||||
return m_mux.is_homogenous_formula(f, n_index());
|
||||
}
|
||||
|
||||
func_decl * o2n(func_decl * p, unsigned o_idx) const
|
||||
{
|
||||
return m_mux.conv(p, o_index(o_idx), n_index());
|
||||
}
|
||||
func_decl * o2o(func_decl * p, unsigned src_idx, unsigned tgt_idx) const
|
||||
{
|
||||
return m_mux.conv(p, o_index(src_idx), o_index(tgt_idx));
|
||||
}
|
||||
func_decl * n2o(func_decl * p, unsigned o_idx) const
|
||||
{
|
||||
return m_mux.conv(p, n_index(), o_index(o_idx));
|
||||
}
|
||||
|
||||
void formula_o2n(expr * f, expr_ref & result, unsigned o_idx, bool homogenous = true) const
|
||||
{ m_mux.conv_formula(f, o_index(o_idx), n_index(), result, homogenous); }
|
||||
|
||||
void formula_n2o(expr * f, expr_ref & result, unsigned o_idx, bool homogenous = true) const
|
||||
{ m_mux.conv_formula(f, n_index(), o_index(o_idx), result, homogenous); }
|
||||
|
||||
void formula_n2o(unsigned o_idx, bool homogenous, expr_ref & result) const
|
||||
{ m_mux.conv_formula(result.get(), n_index(), o_index(o_idx), result, homogenous); }
|
||||
|
||||
void formula_o2o(expr * src, expr_ref & tgt, unsigned src_idx, unsigned tgt_idx, bool homogenous = true) const
|
||||
{ m_mux.conv_formula(src, o_index(src_idx), o_index(tgt_idx), tgt, homogenous); }
|
||||
|
||||
/**
|
||||
Return true if all state symbols which e contains are of one kind (either "n" or one of "o").
|
||||
*/
|
||||
bool is_homogenous_formula(expr * e) const
|
||||
{
|
||||
return m_mux.is_homogenous_formula(e);
|
||||
}
|
||||
|
||||
/**
|
||||
Collect indices used in expression.
|
||||
*/
|
||||
void collect_indices(expr* e, unsigned_vector& indices) const
|
||||
{
|
||||
m_mux.collect_indices(e, indices);
|
||||
}
|
||||
|
||||
/**
|
||||
Collect used variables of each index.
|
||||
*/
|
||||
void collect_variables(expr* e, vector<ptr_vector<app> >& vars) const
|
||||
{
|
||||
m_mux.collect_variables(e, vars);
|
||||
}
|
||||
|
||||
/**
|
||||
Return true iff both s1 and s2 are either "n" or "o" of the same index.
|
||||
If one (or both) of them are not state symbol, return false.
|
||||
*/
|
||||
bool have_different_state_kinds(func_decl * s1, func_decl * s2) const
|
||||
{
|
||||
unsigned i1, i2;
|
||||
return m_mux.try_get_index(s1, i1) && m_mux.try_get_index(s2, i2) && i1 != i2;
|
||||
}
|
||||
|
||||
/**
|
||||
Increase indexes of state symbols in formula by dist.
|
||||
The 'N' index becomes 'O' index with number dist-1.
|
||||
*/
|
||||
void formula_shift(expr * src, expr_ref & tgt, unsigned dist) const
|
||||
{
|
||||
SASSERT(n_index() == 0);
|
||||
SASSERT(o_index(0) == 1);
|
||||
m_mux.shift_formula(src, dist, tgt);
|
||||
}
|
||||
|
||||
void mk_model_into_cube(const expr_ref_vector & mdl, expr_ref & res);
|
||||
void mk_core_into_cube(const expr_ref_vector & core, expr_ref & res);
|
||||
void mk_cube_into_lemma(expr * cube, expr_ref & res);
|
||||
void mk_lemma_into_cube(expr * lemma, expr_ref & res);
|
||||
|
||||
/**
|
||||
Remove from vec all atoms that do not have an "o" state.
|
||||
The order of elements in vec may change.
|
||||
An assumption is that atoms having "o" state of given index
|
||||
do not have "o" states of other indexes or "n" states.
|
||||
*/
|
||||
void filter_o_atoms(expr_ref_vector& vec, unsigned o_idx) const
|
||||
{ m_mux.filter_idx(vec, o_index(o_idx)); }
|
||||
void filter_n_atoms(expr_ref_vector& vec) const
|
||||
{ m_mux.filter_idx(vec, n_index()); }
|
||||
|
||||
/**
|
||||
Partition literals into o_lits and others.
|
||||
*/
|
||||
void partition_o_atoms(expr_ref_vector const& lits,
|
||||
expr_ref_vector& o_lits,
|
||||
expr_ref_vector& other,
|
||||
unsigned o_idx) const
|
||||
{
|
||||
m_mux.partition_o_idx(lits, o_lits, other, o_index(o_idx));
|
||||
}
|
||||
|
||||
void filter_out_non_model_atoms(expr_ref_vector& vec) const
|
||||
{ m_mux.filter_non_model_lits(vec); }
|
||||
|
||||
bool try_get_state_and_value_from_atom(expr * atom, app *& state, app_ref& value);
|
||||
bool try_get_state_decl_from_atom(expr * atom, func_decl *& state);
|
||||
|
||||
|
||||
std::string pp_model(const model_core & mdl) const
|
||||
{ return m_mux.pp_model(mdl); }
|
||||
|
||||
|
||||
void set_background(expr* b) { m_background = b; }
|
||||
|
||||
expr* get_background() const { return m_background; }
|
||||
|
||||
unsigned get_unique_num() { return m_next_unique_num++; }
|
||||
|
||||
solver* mk_fresh() {return m_contexts.mk_fresh();}
|
||||
smt_params& fparams() { return m_contexts.fparams(); }
|
||||
solver* mk_fresh2() {return m_contexts2.mk_fresh();}
|
||||
smt_params &fparams2() { return m_contexts2.fparams(); }
|
||||
solver* mk_fresh3() {return m_contexts3.mk_fresh();}
|
||||
smt_params &fparams3() {return m_contexts3.fparams();}
|
||||
|
||||
|
||||
|
||||
void collect_statistics(statistics& st) const
|
||||
{
|
||||
m_contexts.collect_statistics(st);
|
||||
m_contexts2.collect_statistics(st);
|
||||
m_contexts3.collect_statistics(st);
|
||||
}
|
||||
|
||||
void reset_statistics()
|
||||
{
|
||||
m_contexts.reset_statistics();
|
||||
m_contexts2.reset_statistics();
|
||||
m_contexts3.reset_statistics();
|
||||
}
|
||||
};
|
||||
|
||||
app* mk_zk_const (ast_manager &m, unsigned idx, sort *s);
|
||||
void find_zk_const(expr* e, app_ref_vector &out);
|
||||
bool has_zk_const(expr* e);
|
||||
}
|
||||
|
||||
#endif
|
55
src/muz/spacer/spacer_marshal.cpp
Normal file
55
src/muz/spacer/spacer_marshal.cpp
Normal file
|
@ -0,0 +1,55 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
Module Name:
|
||||
|
||||
spacer_marshal.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
marshaling and unmarshaling of expressions
|
||||
|
||||
--*/
|
||||
#include "spacer_marshal.h"
|
||||
|
||||
#include <sstream>
|
||||
#include "cmd_context.h"
|
||||
#include "smt2parser.h"
|
||||
#include "vector.h"
|
||||
#include "ast_smt_pp.h"
|
||||
#include "ast_pp.h"
|
||||
|
||||
namespace spacer {
|
||||
std::ostream &marshal(std::ostream &os, expr_ref e, ast_manager &m)
|
||||
{
|
||||
ast_smt_pp pp(m);
|
||||
pp.display_smt2(os, e);
|
||||
return os;
|
||||
}
|
||||
|
||||
std::string marshal(expr_ref e, ast_manager &m)
|
||||
{
|
||||
std::stringstream ss;
|
||||
marshal(ss, e, m);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
|
||||
expr_ref unmarshal(std::istream &is, ast_manager &m)
|
||||
{
|
||||
cmd_context ctx(false, &m);
|
||||
ctx.set_ignore_check(true);
|
||||
if (!parse_smt2_commands(ctx, is)) { return expr_ref(0, m); }
|
||||
|
||||
ptr_vector<expr>::const_iterator it = ctx.begin_assertions();
|
||||
ptr_vector<expr>::const_iterator end = ctx.end_assertions();
|
||||
if (it == end) { return expr_ref(m.mk_true(), m); }
|
||||
unsigned size = static_cast<unsigned>(end - it);
|
||||
return expr_ref(m.mk_and(size, it), m);
|
||||
}
|
||||
|
||||
expr_ref unmarshal(std::string s, ast_manager &m)
|
||||
{
|
||||
std::istringstream is(s);
|
||||
return unmarshal(is, m);
|
||||
}
|
||||
}
|
29
src/muz/spacer/spacer_marshal.h
Normal file
29
src/muz/spacer/spacer_marshal.h
Normal file
|
@ -0,0 +1,29 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
Module Name:
|
||||
|
||||
spacer_marshal.h
|
||||
|
||||
Abstract:
|
||||
|
||||
marshaling and unmarshaling of expressions
|
||||
|
||||
--*/
|
||||
#ifndef _SPACER_MARSHAL_H_
|
||||
#define _SPACER_MARSHAL_H_
|
||||
|
||||
#include <string>
|
||||
#include "ast.h"
|
||||
#include <iostream>
|
||||
|
||||
namespace spacer {
|
||||
std::ostream &marshal(std::ostream &os, expr_ref e, ast_manager &m);
|
||||
std::string marshal(expr_ref e, ast_manager &m);
|
||||
expr_ref unmarshal(std::string s, ast_manager &m);
|
||||
expr_ref unmarshal(std::istream &is, ast_manager &m);
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
#endif
|
159
src/muz/spacer/spacer_matrix.cpp
Normal file
159
src/muz/spacer/spacer_matrix.cpp
Normal file
|
@ -0,0 +1,159 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_matrix.cpp
|
||||
|
||||
Abstract:
|
||||
a matrix
|
||||
|
||||
Author:
|
||||
Bernhard Gleiss
|
||||
|
||||
Revision History:
|
||||
|
||||
|
||||
--*/
|
||||
#include "spacer_matrix.h"
|
||||
|
||||
namespace spacer
|
||||
{
|
||||
spacer_matrix::spacer_matrix(unsigned m, unsigned n) : m_num_rows(m), m_num_cols(n)
|
||||
{
|
||||
for (unsigned i=0; i < m; ++i)
|
||||
{
|
||||
vector<rational> v;
|
||||
for (unsigned j=0; j < n; ++j)
|
||||
{
|
||||
v.push_back(rational(0));
|
||||
}
|
||||
m_matrix.push_back(v);
|
||||
}
|
||||
}
|
||||
|
||||
unsigned spacer_matrix::num_rows()
|
||||
{
|
||||
return m_num_rows;
|
||||
}
|
||||
|
||||
unsigned spacer_matrix::num_cols()
|
||||
{
|
||||
return m_num_cols;
|
||||
}
|
||||
|
||||
rational spacer_matrix::get(unsigned int i, unsigned int j)
|
||||
{
|
||||
SASSERT(i < m_num_rows);
|
||||
SASSERT(j < m_num_cols);
|
||||
|
||||
return m_matrix[i][j];
|
||||
}
|
||||
|
||||
void spacer_matrix::set(unsigned int i, unsigned int j, rational v)
|
||||
{
|
||||
SASSERT(i < m_num_rows);
|
||||
SASSERT(j < m_num_cols);
|
||||
|
||||
m_matrix[i][j] = v;
|
||||
}
|
||||
|
||||
unsigned spacer_matrix::perform_gaussian_elimination()
|
||||
{
|
||||
unsigned i=0;
|
||||
unsigned j=0;
|
||||
while(i < m_matrix.size() && j < m_matrix[0].size())
|
||||
{
|
||||
// find maximal element in column with row index bigger or equal i
|
||||
rational max = m_matrix[i][j];
|
||||
unsigned max_index = i;
|
||||
|
||||
for (unsigned k=i+1; k < m_matrix.size(); ++k)
|
||||
{
|
||||
if (max < m_matrix[k][j])
|
||||
{
|
||||
max = m_matrix[k][j];
|
||||
max_index = k;
|
||||
}
|
||||
}
|
||||
|
||||
if (max.is_zero()) // skip this column
|
||||
{
|
||||
++j;
|
||||
}
|
||||
else
|
||||
{
|
||||
// reorder rows if necessary
|
||||
vector<rational> tmp = m_matrix[i];
|
||||
m_matrix[i] = m_matrix[max_index];
|
||||
m_matrix[max_index] = m_matrix[i];
|
||||
|
||||
// normalize row
|
||||
rational pivot = m_matrix[i][j];
|
||||
if (!pivot.is_one())
|
||||
{
|
||||
for (unsigned k=0; k < m_matrix[i].size(); ++k)
|
||||
{
|
||||
m_matrix[i][k] = m_matrix[i][k] / pivot;
|
||||
}
|
||||
}
|
||||
|
||||
// subtract row from all other rows
|
||||
for (unsigned k=1; k < m_matrix.size(); ++k)
|
||||
{
|
||||
if (k != i)
|
||||
{
|
||||
rational factor = m_matrix[k][j];
|
||||
for (unsigned l=0; l < m_matrix[k].size(); ++l)
|
||||
{
|
||||
m_matrix[k][l] = m_matrix[k][l] - (factor * m_matrix[i][l]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
++i;
|
||||
++j;
|
||||
}
|
||||
}
|
||||
|
||||
if (get_verbosity_level() >= 1)
|
||||
{
|
||||
SASSERT(m_matrix.size() > 0);
|
||||
}
|
||||
|
||||
return i; //i points to the row after the last row which is non-zero
|
||||
}
|
||||
|
||||
void spacer_matrix::print_matrix()
|
||||
{
|
||||
verbose_stream() << "\nMatrix\n";
|
||||
for (const auto& row : m_matrix)
|
||||
{
|
||||
for (const auto& element : row)
|
||||
{
|
||||
verbose_stream() << element << ", ";
|
||||
}
|
||||
verbose_stream() << "\n";
|
||||
}
|
||||
verbose_stream() << "\n";
|
||||
}
|
||||
void spacer_matrix::normalize()
|
||||
{
|
||||
rational den = rational::one();
|
||||
for (unsigned i=0; i < m_num_rows; ++i)
|
||||
{
|
||||
for (unsigned j=0; j < m_num_cols; ++j)
|
||||
{
|
||||
den = lcm(den, denominator(m_matrix[i][j]));
|
||||
}
|
||||
}
|
||||
for (unsigned i=0; i < m_num_rows; ++i)
|
||||
{
|
||||
for (unsigned j=0; j < m_num_cols; ++j)
|
||||
{
|
||||
m_matrix[i][j] = den * m_matrix[i][j];
|
||||
SASSERT(m_matrix[i][j].is_int());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
46
src/muz/spacer/spacer_matrix.h
Normal file
46
src/muz/spacer/spacer_matrix.h
Normal file
|
@ -0,0 +1,46 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_matrix.h
|
||||
|
||||
Abstract:
|
||||
a matrix
|
||||
|
||||
Author:
|
||||
Bernhard Gleiss
|
||||
|
||||
Revision History:
|
||||
|
||||
|
||||
--*/
|
||||
#ifndef _SPACER_MATRIX_H_
|
||||
#define _SPACER_MATRIX_H_
|
||||
|
||||
#include "ast.h"
|
||||
|
||||
namespace spacer {
|
||||
|
||||
class spacer_matrix {
|
||||
public:
|
||||
spacer_matrix(unsigned m, unsigned n); // m rows, n colums
|
||||
|
||||
unsigned num_rows();
|
||||
unsigned num_cols();
|
||||
|
||||
rational get(unsigned i, unsigned j);
|
||||
void set(unsigned i, unsigned j, rational v);
|
||||
|
||||
unsigned perform_gaussian_elimination();
|
||||
|
||||
void print_matrix();
|
||||
void normalize();
|
||||
private:
|
||||
unsigned m_num_rows;
|
||||
unsigned m_num_cols;
|
||||
vector<vector<rational>> m_matrix;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
217
src/muz/spacer/spacer_mev_array.cpp
Normal file
217
src/muz/spacer/spacer_mev_array.cpp
Normal file
|
@ -0,0 +1,217 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
model_mev_array.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
Evaluate array expressions in a given model.
|
||||
|
||||
Author:
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
#include"model.h"
|
||||
#include"model_evaluator_params.hpp"
|
||||
#include"rewriter_types.h"
|
||||
#include"model_evaluator.h"
|
||||
#include"spacer_mev_array.h"
|
||||
#include"bool_rewriter.h"
|
||||
#include"arith_rewriter.h"
|
||||
#include"bv_rewriter.h"
|
||||
#include"datatype_rewriter.h"
|
||||
#include"array_rewriter.h"
|
||||
#include"rewriter_def.h"
|
||||
#include"cooperate.h"
|
||||
#include"ast_pp.h"
|
||||
#include"func_interp.h"
|
||||
|
||||
|
||||
|
||||
// model_evaluator_array_util
|
||||
|
||||
|
||||
void model_evaluator_array_util::eval_exprs(model& mdl, expr_ref_vector& es) {
|
||||
for (unsigned j = 0; j < es.size(); ++j) {
|
||||
if (m_array.is_as_array(es.get (j))) {
|
||||
expr_ref r (m);
|
||||
eval(mdl, es.get (j), r);
|
||||
es.set (j, r);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool model_evaluator_array_util::extract_array_func_interp(model& mdl, expr* a, vector<expr_ref_vector>& stores, expr_ref& else_case) {
|
||||
SASSERT(m_array.is_array(a));
|
||||
|
||||
TRACE("model_evaluator", tout << mk_pp(a, m) << "\n";);
|
||||
while (m_array.is_store(a)) {
|
||||
expr_ref_vector store(m);
|
||||
store.append(to_app(a)->get_num_args()-1, to_app(a)->get_args()+1);
|
||||
eval_exprs(mdl, store);
|
||||
stores.push_back(store);
|
||||
a = to_app(a)->get_arg(0);
|
||||
}
|
||||
|
||||
if (m_array.is_const(a)) {
|
||||
else_case = to_app(a)->get_arg(0);
|
||||
return true;
|
||||
}
|
||||
|
||||
while (m_array.is_as_array(a)) {
|
||||
func_decl* f = m_array.get_as_array_func_decl(to_app(a));
|
||||
func_interp* g = mdl.get_func_interp(f);
|
||||
unsigned sz = g->num_entries();
|
||||
unsigned arity = f->get_arity();
|
||||
for (unsigned i = 0; i < sz; ++i) {
|
||||
expr_ref_vector store(m);
|
||||
func_entry const* fe = g->get_entry(i);
|
||||
store.append(arity, fe->get_args());
|
||||
store.push_back(fe->get_result());
|
||||
for (unsigned j = 0; j < store.size(); ++j) {
|
||||
if (!is_ground(store[j].get())) {
|
||||
TRACE("model_evaluator", tout << "could not extract array interpretation: " << mk_pp(a, m) << "\n" << mk_pp(store[j].get(), m) << "\n";);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
eval_exprs(mdl, store);
|
||||
stores.push_back(store);
|
||||
}
|
||||
else_case = g->get_else();
|
||||
if (!else_case) {
|
||||
TRACE("model_evaluator", tout << "no else case " << mk_pp(a, m) << "\n";);
|
||||
return false;
|
||||
}
|
||||
if (!is_ground(else_case)) {
|
||||
TRACE("model_evaluator", tout << "non-ground else case " << mk_pp(a, m) << "\n" << mk_pp(else_case, m) << "\n";);
|
||||
return false;
|
||||
}
|
||||
if (m_array.is_as_array(else_case)) {
|
||||
expr_ref r (m);
|
||||
eval(mdl, else_case, r);
|
||||
else_case = r;
|
||||
}
|
||||
TRACE("model_evaluator", tout << "else case: " << mk_pp(else_case, m) << "\n";);
|
||||
return true;
|
||||
}
|
||||
TRACE("model_evaluator", tout << "no translation: " << mk_pp(a, m) << "\n";);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void model_evaluator_array_util::eval_array_eq(model& mdl, app* e, expr* arg1, expr* arg2, expr_ref& res) {
|
||||
TRACE("model_evaluator", tout << "array equality: " << mk_pp(e, m) << "\n";);
|
||||
expr_ref v1(m), v2(m);
|
||||
eval (mdl, arg1, v1);
|
||||
eval (mdl, arg2, v2);
|
||||
if (v1 == v2) {
|
||||
res = m.mk_true ();
|
||||
return;
|
||||
}
|
||||
sort* s = m.get_sort(arg1);
|
||||
sort* r = get_array_range(s);
|
||||
// give up evaluating finite domain/range arrays
|
||||
if (!r->is_infinite() && !r->is_very_big() && !s->is_infinite() && !s->is_very_big()) {
|
||||
TRACE("model_evaluator", tout << "equality is unknown: " << mk_pp(e, m) << "\n";);
|
||||
res.reset ();
|
||||
return;
|
||||
}
|
||||
vector<expr_ref_vector> store;
|
||||
expr_ref else1(m), else2(m);
|
||||
if (!extract_array_func_interp(mdl, v1, store, else1) ||
|
||||
!extract_array_func_interp(mdl, v2, store, else2)) {
|
||||
TRACE("model_evaluator", tout << "equality is unknown: " << mk_pp(e, m) << "\n";);
|
||||
res.reset ();
|
||||
return;
|
||||
}
|
||||
|
||||
if (else1 != else2) {
|
||||
if (m.is_value(else1) && m.is_value(else2)) {
|
||||
TRACE("model_evaluator", tout
|
||||
<< "defaults are different: " << mk_pp(e, m) << " "
|
||||
<< mk_pp(else1, m) << " " << mk_pp(else2, m) << "\n";);
|
||||
res = m.mk_false ();
|
||||
}
|
||||
else if (m_array.is_array(else1)) {
|
||||
eval_array_eq(mdl, e, else1, else2, res);
|
||||
}
|
||||
else {
|
||||
TRACE("model_evaluator", tout << "equality is unknown: " << mk_pp(e, m) << "\n";);
|
||||
res.reset ();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
expr_ref s1(m), s2(m), w1(m), w2(m);
|
||||
expr_ref_vector args1(m), args2(m);
|
||||
args1.push_back(v1);
|
||||
args2.push_back(v2);
|
||||
for (unsigned i = 0; i < store.size(); ++i) {
|
||||
args1.resize(1);
|
||||
args2.resize(1);
|
||||
args1.append(store[i].size()-1, store[i].c_ptr());
|
||||
args2.append(store[i].size()-1, store[i].c_ptr());
|
||||
s1 = m_array.mk_select(args1.size(), args1.c_ptr());
|
||||
s2 = m_array.mk_select(args2.size(), args2.c_ptr());
|
||||
eval (mdl, s1, w1);
|
||||
eval (mdl, s2, w2);
|
||||
if (w1 == w2) {
|
||||
continue;
|
||||
}
|
||||
if (m.is_value(w1) && m.is_value(w2)) {
|
||||
TRACE("model_evaluator", tout << "Equality evaluation: " << mk_pp(e, m) << "\n";
|
||||
tout << mk_pp(s1, m) << " |-> " << mk_pp(w1, m) << "\n";
|
||||
tout << mk_pp(s2, m) << " |-> " << mk_pp(w2, m) << "\n";);
|
||||
res = m.mk_false ();
|
||||
}
|
||||
else if (m_array.is_array(w1)) {
|
||||
eval_array_eq(mdl, e, w1, w2, res);
|
||||
if (m.is_true (res)) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
else {
|
||||
TRACE("model_evaluator", tout << "equality is unknown: " << mk_pp(e, m) << "\n";);
|
||||
res.reset ();
|
||||
}
|
||||
return;
|
||||
}
|
||||
res = m.mk_true ();
|
||||
}
|
||||
|
||||
void model_evaluator_array_util::eval(model& mdl, expr* e, expr_ref& r, bool model_completion) {
|
||||
model_evaluator mev (mdl);
|
||||
mev.set_model_completion (model_completion);
|
||||
bool eval_result = true;
|
||||
try {
|
||||
mev (e, r);
|
||||
}
|
||||
catch (model_evaluator_exception &) {
|
||||
eval_result = false;
|
||||
}
|
||||
VERIFY(eval_result);
|
||||
|
||||
if (m_array.is_array(e)) {
|
||||
vector<expr_ref_vector> stores;
|
||||
expr_ref_vector args(m);
|
||||
expr_ref else_case(m);
|
||||
if (extract_array_func_interp(mdl, r, stores, else_case)) {
|
||||
r = m_array.mk_const_array(m.get_sort(e), else_case);
|
||||
while (!stores.empty() && stores.back().back() == else_case) {
|
||||
stores.pop_back();
|
||||
}
|
||||
for (unsigned i = stores.size(); i > 0; ) {
|
||||
--i;
|
||||
args.resize(1);
|
||||
args[0] = r;
|
||||
args.append(stores[i]);
|
||||
r = m_array.mk_store(args.size(), args.c_ptr());
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
52
src/muz/spacer/spacer_mev_array.h
Normal file
52
src/muz/spacer/spacer_mev_array.h
Normal file
|
@ -0,0 +1,52 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_mev_array.h
|
||||
|
||||
Abstract:
|
||||
|
||||
Utilities to evaluate arrays in the model.
|
||||
|
||||
Author:
|
||||
based on model_evaluator in muz/pdr/pdr_util.h
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
#ifndef _SPACER_MEV_ARRAY_H_
|
||||
#define _SPACER_MEV_ARRAY_H_
|
||||
|
||||
#include"ast.h"
|
||||
#include"rewriter_types.h"
|
||||
#include"params.h"
|
||||
#include "array_decl_plugin.h"
|
||||
|
||||
/**
|
||||
* based on model_evaluator in muz/pdr/pdr_util.h
|
||||
*/
|
||||
class model_evaluator_array_util {
|
||||
ast_manager& m;
|
||||
array_util m_array;
|
||||
|
||||
void eval_exprs(model& mdl, expr_ref_vector& es);
|
||||
|
||||
bool extract_array_func_interp(model& mdl, expr* a, vector<expr_ref_vector>& stores, expr_ref& else_case);
|
||||
|
||||
public:
|
||||
|
||||
model_evaluator_array_util (ast_manager& m):
|
||||
m (m),
|
||||
m_array (m)
|
||||
{}
|
||||
|
||||
/**
|
||||
* best effort evaluator of extensional array equality.
|
||||
*/
|
||||
void eval_array_eq(model& mdl, app* e, expr* arg1, expr* arg2, expr_ref& res);
|
||||
|
||||
void eval(model& mdl, expr* e, expr_ref& r, bool model_completion = true);
|
||||
};
|
||||
|
||||
#endif
|
289
src/muz/spacer/spacer_min_cut.cpp
Normal file
289
src/muz/spacer/spacer_min_cut.cpp
Normal file
|
@ -0,0 +1,289 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_min_cut.cpp
|
||||
|
||||
Abstract:
|
||||
min cut solver
|
||||
|
||||
Author:
|
||||
Bernhard Gleiss
|
||||
|
||||
Revision History:
|
||||
|
||||
|
||||
--*/
|
||||
#include "spacer_min_cut.h"
|
||||
|
||||
namespace spacer {
|
||||
|
||||
spacer_min_cut::spacer_min_cut()
|
||||
{
|
||||
m_n = 2;
|
||||
|
||||
// push back two empty vectors for source and sink
|
||||
m_edges.push_back(vector<std::pair<unsigned, unsigned>>());
|
||||
m_edges.push_back(vector<std::pair<unsigned, unsigned>>());
|
||||
}
|
||||
|
||||
unsigned spacer_min_cut::new_node()
|
||||
{
|
||||
return m_n++;
|
||||
}
|
||||
|
||||
void spacer_min_cut::add_edge(unsigned int i, unsigned int j, unsigned int capacity)
|
||||
{
|
||||
if (i >= m_edges.size())
|
||||
{
|
||||
m_edges.resize(i + 1);
|
||||
}
|
||||
m_edges[i].insert(std::make_pair(j, 1));
|
||||
STRACE("spacer.mincut",
|
||||
verbose_stream() << "adding edge (" << i << "," << j << ")\n";
|
||||
);
|
||||
|
||||
}
|
||||
|
||||
void spacer_min_cut::compute_min_cut(vector<unsigned>& cut_nodes)
|
||||
{
|
||||
if (m_n == 2)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
m_d.resize(m_n);
|
||||
m_pred.resize(m_n);
|
||||
|
||||
// compute initial distances and number of nodes
|
||||
compute_initial_distances();
|
||||
|
||||
unsigned i = 0;
|
||||
|
||||
while (m_d[0] < m_n)
|
||||
{
|
||||
unsigned j = get_admissible_edge(i);
|
||||
|
||||
if (j < m_n)
|
||||
{
|
||||
// advance(i)
|
||||
m_pred[j] = i;
|
||||
i = j;
|
||||
|
||||
// if i is the sink, augment path
|
||||
if (i == 1)
|
||||
{
|
||||
augment_path();
|
||||
i = 0;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// retreat
|
||||
compute_distance(i);
|
||||
if (i != 0)
|
||||
{
|
||||
i = m_pred[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// split nodes into reachable and unreachable ones
|
||||
vector<bool> reachable(m_n);
|
||||
compute_reachable_nodes(reachable);
|
||||
|
||||
// find all edges between reachable and unreachable nodes and for each such edge, add corresponding lemma to unsat-core
|
||||
compute_cut_and_add_lemmas(reachable, cut_nodes);
|
||||
}
|
||||
|
||||
void spacer_min_cut::compute_initial_distances()
|
||||
{
|
||||
vector<unsigned> todo;
|
||||
vector<bool> visited(m_n);
|
||||
|
||||
todo.push_back(0); // start at the source, since we do postorder traversel
|
||||
|
||||
while (!todo.empty())
|
||||
{
|
||||
unsigned current = todo.back();
|
||||
|
||||
// if we haven't already visited current
|
||||
if (!visited[current]) {
|
||||
bool existsUnvisitedParent = false;
|
||||
|
||||
// add unprocessed parents to stack for DFS. If there is at least one unprocessed parent, don't compute the result
|
||||
// for current now, but wait until those unprocessed parents are processed.
|
||||
for (unsigned i = 0, sz = m_edges[current].size(); i < sz; ++i)
|
||||
{
|
||||
unsigned parent = m_edges[current][i].first;
|
||||
|
||||
// if we haven't visited the current parent yet
|
||||
if(!visited[parent])
|
||||
{
|
||||
// add it to the stack
|
||||
todo.push_back(parent);
|
||||
existsUnvisitedParent = true;
|
||||
}
|
||||
}
|
||||
|
||||
// if we already visited all parents, we can visit current too
|
||||
if (!existsUnvisitedParent) {
|
||||
visited[current] = true;
|
||||
todo.pop_back();
|
||||
|
||||
compute_distance(current); // I.H. all parent distances are already computed
|
||||
}
|
||||
}
|
||||
else {
|
||||
todo.pop_back();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsigned spacer_min_cut::get_admissible_edge(unsigned i)
|
||||
{
|
||||
for (const auto& pair : m_edges[i])
|
||||
{
|
||||
if (pair.second > 0 && m_d[i] == m_d[pair.first] + 1)
|
||||
{
|
||||
return pair.first;
|
||||
}
|
||||
}
|
||||
return m_n; // no element found
|
||||
}
|
||||
|
||||
void spacer_min_cut::augment_path()
|
||||
{
|
||||
// find bottleneck capacity
|
||||
unsigned max = std::numeric_limits<unsigned int>::max();
|
||||
unsigned k = 1;
|
||||
while (k != 0)
|
||||
{
|
||||
unsigned l = m_pred[k];
|
||||
for (const auto& pair : m_edges[l])
|
||||
{
|
||||
if (pair.first == k)
|
||||
{
|
||||
if (max > pair.second)
|
||||
{
|
||||
max = pair.second;
|
||||
}
|
||||
}
|
||||
}
|
||||
k = l;
|
||||
}
|
||||
|
||||
k = 1;
|
||||
while (k != 0)
|
||||
{
|
||||
unsigned l = m_pred[k];
|
||||
|
||||
// decrease capacity
|
||||
for (auto& pair : m_edges[l])
|
||||
{
|
||||
if (pair.first == k)
|
||||
{
|
||||
pair.second -= max;
|
||||
}
|
||||
}
|
||||
// increase reverse flow
|
||||
bool already_exists = false;
|
||||
for (auto& pair : m_edges[k])
|
||||
{
|
||||
if (pair.first == l)
|
||||
{
|
||||
already_exists = true;
|
||||
pair.second += max;
|
||||
}
|
||||
}
|
||||
if (!already_exists)
|
||||
{
|
||||
m_edges[k].insert(std::make_pair(l, max));
|
||||
}
|
||||
k = l;
|
||||
}
|
||||
}
|
||||
|
||||
void spacer_min_cut::compute_distance(unsigned i)
|
||||
{
|
||||
if (i == 1) // sink node
|
||||
{
|
||||
m_d[1] = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
unsigned min = std::numeric_limits<unsigned int>::max();
|
||||
|
||||
// find edge (i,j) with positive residual capacity and smallest distance
|
||||
for (const auto& pair : m_edges[i])
|
||||
{
|
||||
if (pair.second > 0)
|
||||
{
|
||||
unsigned tmp = m_d[pair.first] + 1;
|
||||
if (tmp < min)
|
||||
{
|
||||
min = tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
m_d[i] = min;
|
||||
}
|
||||
}
|
||||
|
||||
void spacer_min_cut::compute_reachable_nodes(vector<bool>& reachable)
|
||||
{
|
||||
vector<unsigned> todo;
|
||||
|
||||
todo.push_back(0);
|
||||
while (!todo.empty())
|
||||
{
|
||||
unsigned current = todo.back();
|
||||
todo.pop_back();
|
||||
|
||||
if (!reachable[current])
|
||||
{
|
||||
reachable[current] = true;
|
||||
|
||||
for (const auto& pair : m_edges[current])
|
||||
{
|
||||
if (pair.second > 0)
|
||||
{
|
||||
todo.push_back(pair.first);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void spacer_min_cut::compute_cut_and_add_lemmas(vector<bool>& reachable, vector<unsigned>& cut_nodes)
|
||||
{
|
||||
vector<unsigned> todo;
|
||||
vector<bool> visited(m_n);
|
||||
|
||||
todo.push_back(0);
|
||||
while (!todo.empty())
|
||||
{
|
||||
unsigned current = todo.back();
|
||||
todo.pop_back();
|
||||
|
||||
if (!visited[current])
|
||||
{
|
||||
visited[current] = true;
|
||||
|
||||
for (const auto& pair : m_edges[current])
|
||||
{
|
||||
unsigned successor = pair.first;
|
||||
if (reachable[successor])
|
||||
{
|
||||
todo.push_back(successor);
|
||||
}
|
||||
else
|
||||
{
|
||||
cut_nodes.push_back(successor);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
52
src/muz/spacer/spacer_min_cut.h
Normal file
52
src/muz/spacer/spacer_min_cut.h
Normal file
|
@ -0,0 +1,52 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_min_cut.h
|
||||
|
||||
Abstract:
|
||||
min cut solver
|
||||
|
||||
Author:
|
||||
Bernhard Gleiss
|
||||
|
||||
Revision History:
|
||||
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef _SPACER_MIN_CUT_H_
|
||||
#define _SPACER_MIN_CUT_H_
|
||||
|
||||
#include "ast.h"
|
||||
|
||||
namespace spacer {
|
||||
|
||||
class spacer_min_cut {
|
||||
public:
|
||||
spacer_min_cut();
|
||||
|
||||
unsigned new_node();
|
||||
void add_edge(unsigned i, unsigned j, unsigned capacity);
|
||||
void compute_min_cut(vector<unsigned>& cut_nodes);
|
||||
|
||||
private:
|
||||
|
||||
unsigned m_n; // number of vertices in the graph
|
||||
|
||||
vector<vector<std::pair<unsigned, unsigned> > > m_edges; // map from node to all outgoing edges together with their weights (also contains "reverse edges")
|
||||
vector<unsigned> m_d; // approximation of distance from node to sink in residual graph
|
||||
vector<unsigned> m_pred; // predecessor-information for reconstruction of augmenting path
|
||||
vector<expr*> m_node_to_formula; // maps each node to the corresponding formula in the original proof
|
||||
|
||||
void compute_initial_distances();
|
||||
unsigned get_admissible_edge(unsigned i);
|
||||
void augment_path();
|
||||
void compute_distance(unsigned i);
|
||||
void compute_reachable_nodes(vector<bool>& reachable);
|
||||
void compute_cut_and_add_lemmas(vector<bool>& reachable, vector<unsigned>& cut_nodes);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
231
src/muz/spacer/spacer_notes.txt
Normal file
231
src/muz/spacer/spacer_notes.txt
Normal file
|
@ -0,0 +1,231 @@
|
|||
a queue contains a model_node
|
||||
|
||||
let n = leaves.pop_top ()
|
||||
|
||||
if (!n.has_derivation ())
|
||||
|
||||
if n.pt ().must_reach (n.post ())
|
||||
add parent of n to the leaves
|
||||
return
|
||||
|
||||
check abstract reachability of n
|
||||
|
||||
if must reachable then
|
||||
create new reachability fact for n.pt ()
|
||||
add parent of n to the leaves
|
||||
else if may reachable then
|
||||
create derivation d for n
|
||||
create model_node kid for the top of d
|
||||
add kid to the leaves
|
||||
|
||||
else /* unreachable */
|
||||
create a lemma for n.pt ()
|
||||
p = parent of n
|
||||
p.reset_derivation()
|
||||
add p to the leaves
|
||||
|
||||
else if (n.has_derivation ())
|
||||
|
||||
create next model_node kid for n.get_derivation ()
|
||||
|
||||
if (kid != NULL)
|
||||
add kid to leaves
|
||||
else /* done with the derivation, no more kids */
|
||||
// the derivation is reachable, otherwise it was reset in another branch
|
||||
p = parent of n
|
||||
p.reset_derivation ()
|
||||
add p to the leaves
|
||||
|
||||
|
||||
=================================================================================
|
||||
create derivation for the top of d
|
||||
input:
|
||||
model M,
|
||||
transition relation formula trans with auxiliary variables quantified out
|
||||
sequence of pedicates P_i,
|
||||
may and must summaries of P_i
|
||||
=================================================================================
|
||||
|
||||
create first derivation child:
|
||||
input: model
|
||||
|
||||
|
||||
create next derivation child:
|
||||
create new model
|
||||
update trans by computing pre-image over new reachability facts
|
||||
call create next derivation child
|
||||
|
||||
private:
|
||||
create next derivation child using a given model, and starting index
|
||||
|
||||
=========================================================
|
||||
|
||||
create a next model for a derivation
|
||||
|
||||
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
||||
|
||||
// an obligation
|
||||
model_node
|
||||
// NULL means root
|
||||
model_node_ref parent
|
||||
model_node_ref_vector kids
|
||||
|
||||
pred_transformer &predicate
|
||||
expr* condition
|
||||
unsigned level
|
||||
unsigned depth
|
||||
// monotonically increasing
|
||||
unsigned id
|
||||
|
||||
bool open;
|
||||
|
||||
|
||||
model_node::close ()
|
||||
open = false
|
||||
for k : kids do k.close ()
|
||||
|
||||
model_search
|
||||
|
||||
model_node_ref root;
|
||||
|
||||
// proof obligations
|
||||
priority_queue m_obligations;
|
||||
model_node_ref m_last_reachable;
|
||||
|
||||
|
||||
bool model_node::operator< (model_node& other)
|
||||
lexicographic order based on
|
||||
level<, depth<, id>
|
||||
|
||||
|
||||
|
||||
assert (!m_last_reachable);
|
||||
while (!m_obligations.empty ())
|
||||
{
|
||||
// propagate reachability as much as possible
|
||||
while (m_last_reachable)
|
||||
{
|
||||
obl = m_last_reachable
|
||||
m_last_reachable.reset ();
|
||||
if (is_root (obl)) return true;
|
||||
if (discharge_obligation (obl.get_parent ()) == l_true)
|
||||
m_last_reachable = obl.get_parent ();
|
||||
}
|
||||
|
||||
// at least one obligation is not closed, ow root was reachable
|
||||
while (m_obligations.top ().is_closed ()) m_obligations.pop ();
|
||||
assert (!m_obligations.empty ());
|
||||
|
||||
// process an obligation
|
||||
assert (!m_last_reachable)
|
||||
obl = m_obligations.top ();
|
||||
switch (discharge_obligation (obl))
|
||||
{
|
||||
case l_true:
|
||||
// if reachable, schedule a reachability round
|
||||
m_last_reachable = m_obligations.top ();
|
||||
m_obligations.pop ();
|
||||
break;
|
||||
case l_false:
|
||||
// if unreachable removed from the queue
|
||||
m_obligations.pop ();
|
||||
/// bump level
|
||||
obl.inc_level ();
|
||||
/// optionally insert back into the queue
|
||||
if (is_active (obl)) m_obligations.push (obl);
|
||||
break;
|
||||
default:
|
||||
assert (m_obligations.top () != obl);
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
||||
/// with priority queue
|
||||
bool is_active (model_node obl) { return level <= m_search.max_level (); }
|
||||
/// with out priority queue. Discharged obligations are dropped
|
||||
bool is_active (model_node obl) { return false; }
|
||||
|
||||
discharge_obligation (model_node obl)
|
||||
{
|
||||
assert (!obl.is_closed ());
|
||||
switch (check_reachability (obl))
|
||||
{
|
||||
case l_true:
|
||||
obl.close ()
|
||||
update reachability facts
|
||||
return l_true;
|
||||
case l_false:
|
||||
update lemmas
|
||||
return l_false
|
||||
case l_unknown:
|
||||
create children
|
||||
populate m_obligations queue
|
||||
return l_unknown
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
=============================================================
|
||||
|
||||
a node keeps a derivation object
|
||||
|
||||
if a node is sat, a new node is constructed and inherits the derivation object
|
||||
if a node is sat and the derivation is done, this is reported to the parent
|
||||
|
||||
expand_node(n):
|
||||
process node ignoring derivation
|
||||
if sat:
|
||||
if concrete:
|
||||
if has derivation and has next child
|
||||
close current node and push new node
|
||||
return l_undef
|
||||
else
|
||||
return l_true
|
||||
else
|
||||
create_child (creates a new node and optionally sets derivation)
|
||||
else if unsat
|
||||
generate lemmas
|
||||
derivation remains unchanged to be used at a higher level
|
||||
return
|
||||
======================================================================
|
||||
1. open disjunction for transition relation
|
||||
- a fresh literal to open the disjunction of the transition relation
|
||||
- expr* expand_init (expr *e) -- add e to initial state and return
|
||||
new disj var
|
||||
- close the disjunction by passing the negation of the literal
|
||||
during various calls
|
||||
- store the literal negated to have access to both positive and
|
||||
negative versions
|
||||
- with this, can do an optional check whether the lemmas alone are
|
||||
strong enough to discharge the counterexample. Easiest is to
|
||||
implement it as a separate pre-check.
|
||||
|
||||
2. auxiliary variables in lemmas and reach-facts.
|
||||
- store and expect auxiliary variables
|
||||
- quantify them out when necessary
|
||||
|
||||
3. initial rules as reach-facts
|
||||
- add initial rules of a predicate to its reach-facts. Propagate them to uses.
|
||||
- this way, the checks at level 0 will include initial rules of
|
||||
immediate predecessors
|
||||
======================================================================
|
||||
|
||||
reach_fact_ref_vector m_reach_facts
|
||||
app_ref_vector m_reach_case_vars
|
||||
|
||||
bool is_must_reachable (expr *state, model_ref *model)
|
||||
reach_fact* get_used_reach_fact (model_evaluator &mev)
|
||||
app* mk_fresh_reach_case_var ()
|
||||
expr* get_reach ()
|
||||
expr* get_last_reach_case_var ()
|
||||
app* get_reach_case_var (unsigned idx)
|
||||
|
||||
get_used_origin_reach_fact():
|
||||
|
||||
|
||||
======================================================================
|
||||
4. track relationship between an obligation and lemmas. Attempt to
|
||||
generalize an obligation into the exact lemma that worked
|
||||
before. Perhaps pick one lemma with highest level? Implement as
|
||||
core-generalizer. Will require reworking how legacy_frames is implemented.
|
332
src/muz/spacer/spacer_proof_utils.cpp
Normal file
332
src/muz/spacer/spacer_proof_utils.cpp
Normal file
|
@ -0,0 +1,332 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_proof_utils.cpp
|
||||
|
||||
Abstract:
|
||||
Utilities to traverse and manipulate proofs
|
||||
|
||||
Author:
|
||||
Bernhard Gleiss
|
||||
Arie Gurfinkel
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#include "spacer_proof_utils.h"
|
||||
#include "ast_util.h"
|
||||
#include "ast_pp.h"
|
||||
|
||||
#include "proof_checker.h"
|
||||
|
||||
namespace spacer {
|
||||
|
||||
ProofIteratorPostOrder::ProofIteratorPostOrder(proof* root, ast_manager& manager) : m(manager)
|
||||
{m_todo.push_back(root);}
|
||||
|
||||
bool ProofIteratorPostOrder::hasNext()
|
||||
{return !m_todo.empty();}
|
||||
|
||||
/*
|
||||
* iterative post-order depth-first search (DFS) through the proof DAG
|
||||
*/
|
||||
proof* ProofIteratorPostOrder::next()
|
||||
{
|
||||
while (!m_todo.empty()) {
|
||||
proof* currentNode = m_todo.back();
|
||||
|
||||
// if we haven't already visited the current unit
|
||||
if (!m_visited.is_marked(currentNode)) {
|
||||
bool existsUnvisitedParent = false;
|
||||
|
||||
// add unprocessed premises to stack for DFS. If there is at least one unprocessed premise, don't compute the result
|
||||
// for currentProof now, but wait until those unprocessed premises are processed.
|
||||
for (unsigned i = 0; i < m.get_num_parents(currentNode); ++i) {
|
||||
SASSERT(m.is_proof(currentNode->get_arg(i)));
|
||||
proof* premise = to_app(currentNode->get_arg(i));
|
||||
|
||||
// if we haven't visited the current premise yet
|
||||
if (!m_visited.is_marked(premise)) {
|
||||
// add it to the stack
|
||||
m_todo.push_back(premise);
|
||||
existsUnvisitedParent = true;
|
||||
}
|
||||
}
|
||||
|
||||
// if we already visited all parent-inferences, we can visit the inference too
|
||||
if (!existsUnvisitedParent) {
|
||||
m_visited.mark(currentNode, true);
|
||||
m_todo.pop_back();
|
||||
return currentNode;
|
||||
}
|
||||
} else {
|
||||
m_todo.pop_back();
|
||||
}
|
||||
}
|
||||
// we have already iterated through all inferences
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
class reduce_hypotheses {
|
||||
ast_manager &m;
|
||||
// tracking all created expressions
|
||||
expr_ref_vector m_pinned;
|
||||
|
||||
// cache for the transformation
|
||||
obj_map<proof, proof*> m_cache;
|
||||
|
||||
// map from unit literals to their hypotheses-free derivations
|
||||
obj_map<expr, proof*> m_units;
|
||||
|
||||
// -- all hypotheses in the the proof
|
||||
obj_hashtable<expr> m_hyps;
|
||||
|
||||
// marks hypothetical proofs
|
||||
ast_mark m_hypmark;
|
||||
|
||||
|
||||
// stack
|
||||
ptr_vector<proof> m_todo;
|
||||
|
||||
void reset()
|
||||
{
|
||||
m_cache.reset();
|
||||
m_units.reset();
|
||||
m_hyps.reset();
|
||||
m_hypmark.reset();
|
||||
m_pinned.reset();
|
||||
}
|
||||
|
||||
bool compute_mark1(proof *pr)
|
||||
{
|
||||
bool hyp_mark = false;
|
||||
// lemmas clear all hypotheses
|
||||
if (!m.is_lemma(pr)) {
|
||||
for (unsigned i = 0, sz = m.get_num_parents(pr); i < sz; ++i) {
|
||||
if (m_hypmark.is_marked(m.get_parent(pr, i))) {
|
||||
hyp_mark = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
m_hypmark.mark(pr, hyp_mark);
|
||||
return hyp_mark;
|
||||
}
|
||||
|
||||
void compute_marks(proof* pr)
|
||||
{
|
||||
proof *p;
|
||||
ProofIteratorPostOrder pit(pr, m);
|
||||
while (pit.hasNext()) {
|
||||
p = pit.next();
|
||||
if (m.is_hypothesis(p)) {
|
||||
m_hypmark.mark(p, true);
|
||||
m_hyps.insert(m.get_fact(p));
|
||||
} else {
|
||||
bool hyp_mark = compute_mark1(p);
|
||||
// collect units that are hyp-free and are used as hypotheses somewhere
|
||||
if (!hyp_mark && m.has_fact(p) && m_hyps.contains(m.get_fact(p)))
|
||||
{ m_units.insert(m.get_fact(p), p); }
|
||||
}
|
||||
}
|
||||
}
|
||||
void find_units(proof *pr)
|
||||
{
|
||||
// optional. not implemented yet.
|
||||
}
|
||||
|
||||
void reduce(proof* pf, proof_ref &out)
|
||||
{
|
||||
proof *res = NULL;
|
||||
|
||||
m_todo.reset();
|
||||
m_todo.push_back(pf);
|
||||
ptr_buffer<proof> args;
|
||||
bool dirty = false;
|
||||
|
||||
while (!m_todo.empty()) {
|
||||
proof *p, *tmp, *pp;
|
||||
unsigned todo_sz;
|
||||
|
||||
p = m_todo.back();
|
||||
if (m_cache.find(p, tmp)) {
|
||||
res = tmp;
|
||||
m_todo.pop_back();
|
||||
continue;
|
||||
}
|
||||
|
||||
dirty = false;
|
||||
args.reset();
|
||||
todo_sz = m_todo.size();
|
||||
for (unsigned i = 0, sz = m.get_num_parents(p); i < sz; ++i) {
|
||||
pp = m.get_parent(p, i);
|
||||
if (m_cache.find(pp, tmp)) {
|
||||
args.push_back(tmp);
|
||||
dirty = dirty || pp != tmp;
|
||||
} else {
|
||||
m_todo.push_back(pp);
|
||||
}
|
||||
}
|
||||
|
||||
if (todo_sz < m_todo.size()) { continue; }
|
||||
else { m_todo.pop_back(); }
|
||||
|
||||
if (m.is_hypothesis(p)) {
|
||||
// hyp: replace by a corresponding unit
|
||||
if (m_units.find(m.get_fact(p), tmp)) {
|
||||
res = tmp;
|
||||
} else { res = p; }
|
||||
}
|
||||
|
||||
else if (!dirty) { res = p; }
|
||||
|
||||
else if (m.is_lemma(p)) {
|
||||
//lemma: reduce the premise; remove reduced consequences from conclusion
|
||||
SASSERT(args.size() == 1);
|
||||
res = mk_lemma_core(args.get(0), m.get_fact(p));
|
||||
compute_mark1(res);
|
||||
} else if (m.is_unit_resolution(p)) {
|
||||
// unit: reduce untis; reduce the first premise; rebuild unit resolution
|
||||
res = mk_unit_resolution_core(args.size(), args.c_ptr());
|
||||
compute_mark1(res);
|
||||
} else {
|
||||
// other: reduce all premises; reapply
|
||||
if (m.has_fact(p)) { args.push_back(to_app(m.get_fact(p))); }
|
||||
SASSERT(p->get_decl()->get_arity() == args.size());
|
||||
res = m.mk_app(p->get_decl(), args.size(), (expr * const*)args.c_ptr());
|
||||
m_pinned.push_back(res);
|
||||
compute_mark1(res);
|
||||
}
|
||||
|
||||
SASSERT(res);
|
||||
m_cache.insert(p, res);
|
||||
|
||||
if (m.has_fact(res) && m.is_false(m.get_fact(res))) { break; }
|
||||
}
|
||||
|
||||
out = res;
|
||||
}
|
||||
|
||||
// returns true if (hypothesis (not a)) would be reduced
|
||||
bool is_reduced(expr *a)
|
||||
{
|
||||
expr_ref e(m);
|
||||
if (m.is_not(a)) { e = to_app(a)->get_arg(0); }
|
||||
else { e = m.mk_not(a); }
|
||||
|
||||
return m_units.contains(e);
|
||||
}
|
||||
proof *mk_lemma_core(proof *pf, expr *fact)
|
||||
{
|
||||
ptr_buffer<expr> args;
|
||||
expr_ref lemma(m);
|
||||
|
||||
if (m.is_or(fact)) {
|
||||
for (unsigned i = 0, sz = to_app(fact)->get_num_args(); i < sz; ++i) {
|
||||
expr *a = to_app(fact)->get_arg(i);
|
||||
if (!is_reduced(a))
|
||||
{ args.push_back(a); }
|
||||
}
|
||||
} else if (!is_reduced(fact))
|
||||
{ args.push_back(fact); }
|
||||
|
||||
|
||||
if (args.size() == 0) { return pf; }
|
||||
else if (args.size() == 1) {
|
||||
lemma = args.get(0);
|
||||
} else {
|
||||
lemma = m.mk_or(args.size(), args.c_ptr());
|
||||
}
|
||||
proof* res = m.mk_lemma(pf, lemma);
|
||||
m_pinned.push_back(res);
|
||||
|
||||
if (m_hyps.contains(lemma))
|
||||
{ m_units.insert(lemma, res); }
|
||||
return res;
|
||||
}
|
||||
|
||||
proof *mk_unit_resolution_core(unsigned num_args, proof* const *args)
|
||||
{
|
||||
|
||||
ptr_buffer<proof> pf_args;
|
||||
pf_args.push_back(args [0]);
|
||||
|
||||
app *cls_fact = to_app(m.get_fact(args[0]));
|
||||
ptr_buffer<expr> cls;
|
||||
if (m.is_or(cls_fact)) {
|
||||
for (unsigned i = 0, sz = cls_fact->get_num_args(); i < sz; ++i)
|
||||
{ cls.push_back(cls_fact->get_arg(i)); }
|
||||
} else { cls.push_back(cls_fact); }
|
||||
|
||||
// construct new resovent
|
||||
ptr_buffer<expr> new_fact_cls;
|
||||
bool found;
|
||||
// XXX quadratic
|
||||
for (unsigned i = 0, sz = cls.size(); i < sz; ++i) {
|
||||
found = false;
|
||||
for (unsigned j = 1; j < num_args; ++j) {
|
||||
if (m.is_complement(cls.get(i), m.get_fact(args [j]))) {
|
||||
found = true;
|
||||
pf_args.push_back(args [j]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
new_fact_cls.push_back(cls.get(i));
|
||||
}
|
||||
}
|
||||
|
||||
SASSERT(new_fact_cls.size() + pf_args.size() - 1 == cls.size());
|
||||
expr_ref new_fact(m);
|
||||
new_fact = mk_or(m, new_fact_cls.size(), new_fact_cls.c_ptr());
|
||||
|
||||
// create new proof step
|
||||
proof *res = m.mk_unit_resolution(pf_args.size(), pf_args.c_ptr(), new_fact);
|
||||
m_pinned.push_back(res);
|
||||
return res;
|
||||
}
|
||||
|
||||
// reduce all units, if any unit reduces to false return true and put its proof into out
|
||||
bool reduce_units(proof_ref &out)
|
||||
{
|
||||
proof_ref res(m);
|
||||
for (auto entry : m_units) {
|
||||
reduce(entry.get_value(), res);
|
||||
if (m.is_false(m.get_fact(res))) {
|
||||
out = res;
|
||||
return true;
|
||||
}
|
||||
res.reset();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
public:
|
||||
reduce_hypotheses(ast_manager &m) : m(m), m_pinned(m) {}
|
||||
|
||||
|
||||
void operator()(proof_ref &pr)
|
||||
{
|
||||
compute_marks(pr);
|
||||
if (!reduce_units(pr)) {
|
||||
reduce(pr.get(), pr);
|
||||
}
|
||||
reset();
|
||||
}
|
||||
};
|
||||
void reduce_hypotheses(proof_ref &pr)
|
||||
{
|
||||
ast_manager &m = pr.get_manager();
|
||||
class reduce_hypotheses hypred(m);
|
||||
hypred(pr);
|
||||
DEBUG_CODE(proof_checker pc(m);
|
||||
expr_ref_vector side(m);
|
||||
SASSERT(pc.check(pr, side));
|
||||
);
|
||||
}
|
||||
}
|
43
src/muz/spacer/spacer_proof_utils.h
Normal file
43
src/muz/spacer/spacer_proof_utils.h
Normal file
|
@ -0,0 +1,43 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_proof_utils.cpp
|
||||
|
||||
Abstract:
|
||||
Utilities to traverse and manipulate proofs
|
||||
|
||||
Author:
|
||||
Bernhard Gleiss
|
||||
Arie Gurfinkel
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef _SPACER_PROOF_UTILS_H_
|
||||
#define _SPACER_PROOF_UTILS_H_
|
||||
#include "ast.h"
|
||||
|
||||
namespace spacer {
|
||||
/*
|
||||
* iterator, which traverses the proof in depth-first post-order.
|
||||
*/
|
||||
class ProofIteratorPostOrder {
|
||||
public:
|
||||
ProofIteratorPostOrder(proof* refutation, ast_manager& manager);
|
||||
bool hasNext();
|
||||
proof* next();
|
||||
|
||||
private:
|
||||
ptr_vector<proof> m_todo;
|
||||
ast_mark m_visited; // the proof nodes we have already visited
|
||||
|
||||
ast_manager& m;
|
||||
};
|
||||
|
||||
|
||||
void reduce_hypotheses(proof_ref &pr);
|
||||
}
|
||||
#endif
|
298
src/muz/spacer/spacer_prop_solver.cpp
Normal file
298
src/muz/spacer/spacer_prop_solver.cpp
Normal file
|
@ -0,0 +1,298 @@
|
|||
/**
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_prop_solver.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
SAT solver abstraction for SPACER.
|
||||
|
||||
Author:
|
||||
|
||||
Arie Gurfinkel
|
||||
Anvesh Komuravelli
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#include <sstream>
|
||||
#include "model.h"
|
||||
#include "spacer_util.h"
|
||||
#include "spacer_prop_solver.h"
|
||||
#include "ast_smt2_pp.h"
|
||||
#include "dl_util.h"
|
||||
#include "model_pp.h"
|
||||
#include "smt_params.h"
|
||||
#include "datatype_decl_plugin.h"
|
||||
#include "bv_decl_plugin.h"
|
||||
#include "spacer_farkas_learner.h"
|
||||
#include "ast_smt2_pp.h"
|
||||
#include "expr_replacer.h"
|
||||
#include "fixedpoint_params.hpp"
|
||||
|
||||
namespace spacer {
|
||||
|
||||
prop_solver::prop_solver(manager& pm, fixedpoint_params const& p, symbol const& name) :
|
||||
m(pm.get_manager()),
|
||||
m_pm(pm),
|
||||
m_name(name),
|
||||
m_ctx(NULL),
|
||||
m_pos_level_atoms(m),
|
||||
m_neg_level_atoms(m),
|
||||
m_core(0),
|
||||
m_subset_based_core(false),
|
||||
m_uses_level(infty_level()),
|
||||
m_delta_level(false),
|
||||
m_in_level(false),
|
||||
m_use_push_bg(p.spacer_keep_proxy())
|
||||
{
|
||||
|
||||
m_solvers[0] = pm.mk_fresh();
|
||||
m_fparams[0] = &pm.fparams();
|
||||
|
||||
m_solvers[1] = pm.mk_fresh2();
|
||||
m_fparams[1] = &pm.fparams2();
|
||||
|
||||
m_contexts[0] = alloc(spacer::itp_solver, *(m_solvers[0]), p.spacer_new_unsat_core(), p.spacer_minimize_unsat_core(), p.spacer_farkas_optimized(), p.spacer_farkas_a_const(), p.spacer_split_farkas_literals());
|
||||
m_contexts[1] = alloc(spacer::itp_solver, *(m_solvers[1]), p.spacer_new_unsat_core(), p.spacer_minimize_unsat_core(), p.spacer_farkas_optimized(), p.spacer_farkas_a_const(), p.spacer_split_farkas_literals());
|
||||
|
||||
for (unsigned i = 0; i < 2; ++i)
|
||||
{ m_contexts[i]->assert_expr(m_pm.get_background()); }
|
||||
}
|
||||
|
||||
void prop_solver::add_level()
|
||||
{
|
||||
unsigned idx = level_cnt();
|
||||
std::stringstream name;
|
||||
name << m_name << "#level_" << idx;
|
||||
func_decl * lev_pred = m.mk_fresh_func_decl(name.str().c_str(), 0, 0, m.mk_bool_sort());
|
||||
m_level_preds.push_back(lev_pred);
|
||||
|
||||
app_ref pos_la(m.mk_const(lev_pred), m);
|
||||
app_ref neg_la(m.mk_not(pos_la.get()), m);
|
||||
|
||||
m_pos_level_atoms.push_back(pos_la);
|
||||
m_neg_level_atoms.push_back(neg_la);
|
||||
|
||||
m_level_atoms_set.insert(pos_la.get());
|
||||
m_level_atoms_set.insert(neg_la.get());
|
||||
}
|
||||
|
||||
void prop_solver::ensure_level(unsigned lvl)
|
||||
{
|
||||
while (lvl >= level_cnt()) {
|
||||
add_level();
|
||||
}
|
||||
}
|
||||
|
||||
unsigned prop_solver::level_cnt() const
|
||||
{
|
||||
return m_level_preds.size();
|
||||
}
|
||||
|
||||
void prop_solver::assert_level_atoms(unsigned level)
|
||||
{
|
||||
unsigned lev_cnt = level_cnt();
|
||||
for (unsigned i = 0; i < lev_cnt; i++) {
|
||||
bool active = m_delta_level ? i == level : i >= level;
|
||||
app * lev_atom =
|
||||
active ? m_neg_level_atoms.get(i) : m_pos_level_atoms.get(i);
|
||||
m_ctx->push_bg(lev_atom);
|
||||
}
|
||||
}
|
||||
|
||||
void prop_solver::assert_expr(expr * form)
|
||||
{
|
||||
SASSERT(!m_in_level);
|
||||
m_contexts[0]->assert_expr(form);
|
||||
m_contexts[1]->assert_expr(form);
|
||||
IF_VERBOSE(21, verbose_stream() << "$ asserted " << mk_pp(form, m) << "\n";);
|
||||
TRACE("spacer", tout << "add_formula: " << mk_pp(form, m) << "\n";);
|
||||
}
|
||||
|
||||
void prop_solver::assert_expr(expr * form, unsigned level)
|
||||
{
|
||||
ensure_level(level);
|
||||
app * lev_atom = m_pos_level_atoms[level].get();
|
||||
app_ref lform(m.mk_or(form, lev_atom), m);
|
||||
assert_expr(lform);
|
||||
}
|
||||
|
||||
|
||||
/// Poor man's maxsat. No guarantees of maximum solution
|
||||
/// Runs maxsat loop on m_ctx Returns l_false if hard is unsat,
|
||||
/// otherwise reduces soft such that hard & soft is sat.
|
||||
lbool prop_solver::maxsmt(expr_ref_vector &hard, expr_ref_vector &soft)
|
||||
{
|
||||
// replace expressions by assumption literals
|
||||
itp_solver::scoped_mk_proxy _p_(*m_ctx, hard);
|
||||
unsigned hard_sz = hard.size();
|
||||
// assume soft constraints are propositional literals (no need to proxy)
|
||||
hard.append(soft);
|
||||
|
||||
lbool res = m_ctx->check_sat(hard.size(), hard.c_ptr());
|
||||
// if hard constraints alone are unsat or there are no soft
|
||||
// constraints, we are done
|
||||
if (res != l_false || soft.empty()) { return res; }
|
||||
|
||||
// clear soft constraints, we will recompute them later
|
||||
soft.reset();
|
||||
|
||||
expr_ref saved(m);
|
||||
ptr_vector<expr> core;
|
||||
m_ctx->get_unsat_core(core);
|
||||
|
||||
// while there are soft constraints
|
||||
while (hard.size() > hard_sz) {
|
||||
bool found = false;
|
||||
// look for a soft constraint that is in the unsat core
|
||||
for (unsigned i = hard_sz, sz = hard.size(); i < sz; ++i)
|
||||
if (core.contains(hard.get(i))) {
|
||||
found = true;
|
||||
// AG: not sure why we are saving it
|
||||
saved = hard.get(i);
|
||||
hard[i] = hard.back();
|
||||
hard.pop_back();
|
||||
break;
|
||||
}
|
||||
// if no soft constraints in the core, return this should
|
||||
// not happen because it implies that hard alone is unsat
|
||||
// and that is taken care of earlier
|
||||
if (!found) {
|
||||
hard.resize(hard_sz);
|
||||
return l_false;
|
||||
}
|
||||
|
||||
// check that the NEW constraints became sat
|
||||
res = m_ctx->check_sat(hard.size(), hard.c_ptr());
|
||||
if (res != l_false) { break; }
|
||||
// still unsat, update the core and repeat
|
||||
core.reset();
|
||||
m_ctx->get_unsat_core(core);
|
||||
}
|
||||
|
||||
// update soft with found soft constraints
|
||||
if (res == l_true) {
|
||||
for (unsigned i = hard_sz, sz = hard.size(); i < sz; ++i)
|
||||
{ soft.push_back(hard.get(i)); }
|
||||
}
|
||||
// revert hard back to the right size
|
||||
// proxies are undone on exit via scoped_mk_proxy
|
||||
hard.resize(hard_sz);
|
||||
return res;
|
||||
}
|
||||
|
||||
lbool prop_solver::internal_check_assumptions(
|
||||
expr_ref_vector& hard_atoms,
|
||||
expr_ref_vector& soft_atoms)
|
||||
{
|
||||
// XXX Turn model generation if m_model != 0
|
||||
SASSERT(m_ctx);
|
||||
SASSERT(m_ctx_fparams);
|
||||
flet<bool> _model(m_ctx_fparams->m_model, m_model != 0);
|
||||
|
||||
if (m_in_level) { assert_level_atoms(m_current_level); }
|
||||
lbool result = maxsmt(hard_atoms, soft_atoms);
|
||||
if (result != l_false && m_model) { m_ctx->get_model(*m_model); }
|
||||
|
||||
SASSERT(result != l_false || soft_atoms.empty());
|
||||
|
||||
/// compute level used in the core
|
||||
// XXX this is a poor approximation because the core will get minimized further
|
||||
if (result == l_false) {
|
||||
ptr_vector<expr> core;
|
||||
m_ctx->get_full_unsat_core(core);
|
||||
unsigned core_size = core.size();
|
||||
m_uses_level = infty_level();
|
||||
|
||||
for (unsigned i = 0; i < core_size; ++i) {
|
||||
if (m_level_atoms_set.contains(core[i])) {
|
||||
unsigned sz = std::min(m_uses_level, m_neg_level_atoms.size());
|
||||
for (unsigned j = 0; j < sz; ++j)
|
||||
if (m_neg_level_atoms [j].get() == core[i]) {
|
||||
m_uses_level = j;
|
||||
break;
|
||||
}
|
||||
SASSERT(!is_infty_level(m_uses_level));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (result == l_false && m_core && m.proofs_enabled() && !m_subset_based_core) {
|
||||
TRACE("spacer", tout << "theory core\n";);
|
||||
m_core->reset();
|
||||
m_ctx->get_itp_core(*m_core);
|
||||
} else if (result == l_false && m_core) {
|
||||
m_core->reset();
|
||||
m_ctx->get_unsat_core(*m_core);
|
||||
// manually undo proxies because maxsmt() call above manually adds proxies
|
||||
m_ctx->undo_proxies(*m_core);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
||||
lbool prop_solver::check_assumptions(const expr_ref_vector & _hard,
|
||||
expr_ref_vector& soft,
|
||||
unsigned num_bg, expr * const * bg,
|
||||
unsigned solver_id)
|
||||
{
|
||||
// current clients expect that flattening of HARD is
|
||||
// done implicitly during check_assumptions
|
||||
expr_ref_vector hard(m);
|
||||
hard.append(_hard.size(), _hard.c_ptr());
|
||||
flatten_and(hard);
|
||||
|
||||
m_ctx = m_contexts [solver_id == 0 ? 0 : 0 /* 1 */].get();
|
||||
m_ctx_fparams = m_fparams [solver_id == 0 ? 0 : 0 /* 1 */];
|
||||
|
||||
// can be disabled if use_push_bg == true
|
||||
// solver::scoped_push _s_(*m_ctx);
|
||||
if (!m_use_push_bg) { m_ctx->push(); }
|
||||
itp_solver::scoped_bg _b_(*m_ctx);
|
||||
|
||||
for (unsigned i = 0; i < num_bg; ++i)
|
||||
if (m_use_push_bg) { m_ctx->push_bg(bg [i]); }
|
||||
else { m_ctx->assert_expr(bg[i]); }
|
||||
|
||||
unsigned soft_sz = soft.size();
|
||||
#pragma unused(soft_sz)
|
||||
lbool res = internal_check_assumptions(hard, soft);
|
||||
if (!m_use_push_bg) { m_ctx->pop(1); }
|
||||
|
||||
TRACE("psolve_verbose",
|
||||
tout << "sat: " << mk_pp(mk_and(hard), m) << "\n"
|
||||
<< mk_pp(mk_and(soft), m) << "\n";
|
||||
for (unsigned i = 0; i < num_bg; ++i)
|
||||
tout << "bg" << i << ": " << mk_pp(bg[i], m) << "\n";
|
||||
tout << "res: " << res << "\n";);
|
||||
CTRACE("psolve", m_core,
|
||||
tout << "core is: " << mk_pp(mk_and(*m_core), m) << "\n";);
|
||||
|
||||
SASSERT(soft_sz >= soft.size());
|
||||
|
||||
// -- reset all parameters
|
||||
m_core = 0;
|
||||
m_model = 0;
|
||||
m_subset_based_core = false;
|
||||
return res;
|
||||
}
|
||||
|
||||
void prop_solver::collect_statistics(statistics& st) const
|
||||
{
|
||||
m_contexts[0]->collect_statistics(st);
|
||||
m_contexts[1]->collect_statistics(st);
|
||||
}
|
||||
|
||||
void prop_solver::reset_statistics()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
143
src/muz/spacer/spacer_prop_solver.h
Normal file
143
src/muz/spacer/spacer_prop_solver.h
Normal file
|
@ -0,0 +1,143 @@
|
|||
/**
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_prop_solver.h
|
||||
|
||||
Abstract:
|
||||
|
||||
SAT solver abstraction for SPACER.
|
||||
|
||||
Author:
|
||||
|
||||
Arie Gurfinkel
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef _PROP_SOLVER_H_
|
||||
#define _PROP_SOLVER_H_
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include "ast.h"
|
||||
#include "obj_hashtable.h"
|
||||
#include "smt_kernel.h"
|
||||
#include "util.h"
|
||||
#include "vector.h"
|
||||
#include "spacer_manager.h"
|
||||
#include "spacer_smt_context_manager.h"
|
||||
#include "spacer_itp_solver.h"
|
||||
|
||||
struct fixedpoint_params;
|
||||
|
||||
namespace spacer {
|
||||
|
||||
class prop_solver {
|
||||
|
||||
private:
|
||||
ast_manager& m;
|
||||
manager& m_pm;
|
||||
symbol m_name;
|
||||
smt_params* m_fparams[2];
|
||||
solver* m_solvers[2];
|
||||
scoped_ptr<itp_solver> m_contexts[2];
|
||||
itp_solver * m_ctx;
|
||||
smt_params * m_ctx_fparams;
|
||||
decl_vector m_level_preds;
|
||||
app_ref_vector m_pos_level_atoms; // atoms used to identify level
|
||||
app_ref_vector m_neg_level_atoms; //
|
||||
obj_hashtable<expr> m_level_atoms_set;
|
||||
expr_ref_vector* m_core;
|
||||
model_ref* m_model;
|
||||
bool m_subset_based_core;
|
||||
unsigned m_uses_level;
|
||||
/// if true sets the solver into a delta level, enabling only
|
||||
/// atoms explicitly asserted in m_current_level
|
||||
bool m_delta_level;
|
||||
bool m_in_level;
|
||||
bool m_use_push_bg;
|
||||
unsigned m_current_level; // set when m_in_level
|
||||
|
||||
void assert_level_atoms(unsigned level);
|
||||
|
||||
void ensure_level(unsigned lvl);
|
||||
|
||||
lbool internal_check_assumptions(expr_ref_vector &hard,
|
||||
expr_ref_vector &soft);
|
||||
|
||||
lbool maxsmt(expr_ref_vector &hard, expr_ref_vector &soft);
|
||||
|
||||
|
||||
public:
|
||||
prop_solver(spacer::manager& pm, fixedpoint_params const& p, symbol const& name);
|
||||
|
||||
|
||||
void set_core(expr_ref_vector* core) { m_core = core; }
|
||||
void set_model(model_ref* mdl) { m_model = mdl; }
|
||||
void set_subset_based_core(bool f) { m_subset_based_core = f; }
|
||||
bool assumes_level() const { return !is_infty_level(m_uses_level); }
|
||||
unsigned uses_level() const {return m_uses_level;}
|
||||
|
||||
|
||||
void add_level();
|
||||
unsigned level_cnt() const;
|
||||
|
||||
|
||||
void assert_expr(expr * form);
|
||||
void assert_expr(expr * form, unsigned level);
|
||||
|
||||
/**
|
||||
* check assumptions with a background formula
|
||||
*/
|
||||
lbool check_assumptions(const expr_ref_vector & hard,
|
||||
expr_ref_vector & soft,
|
||||
unsigned num_bg = 0,
|
||||
expr * const *bg = NULL,
|
||||
unsigned solver_id = 0);
|
||||
|
||||
void collect_statistics(statistics& st) const;
|
||||
void reset_statistics();
|
||||
|
||||
class scoped_level {
|
||||
bool& m_lev;
|
||||
public:
|
||||
scoped_level(prop_solver& ps, unsigned lvl): m_lev(ps.m_in_level)
|
||||
{
|
||||
SASSERT(!m_lev);
|
||||
m_lev = true;
|
||||
ps.m_current_level = lvl;
|
||||
}
|
||||
~scoped_level() { m_lev = false; }
|
||||
};
|
||||
|
||||
class scoped_subset_core {
|
||||
prop_solver &m_ps;
|
||||
bool m_subset_based_core;
|
||||
|
||||
public:
|
||||
scoped_subset_core(prop_solver &ps, bool subset_core) :
|
||||
m_ps(ps), m_subset_based_core(ps.m_subset_based_core)
|
||||
{m_ps.set_subset_based_core(subset_core);}
|
||||
|
||||
~scoped_subset_core()
|
||||
{m_ps.set_subset_based_core(m_subset_based_core);}
|
||||
};
|
||||
|
||||
class scoped_delta_level : public scoped_level {
|
||||
bool &m_delta;
|
||||
public:
|
||||
scoped_delta_level(prop_solver &ps, unsigned lvl) :
|
||||
scoped_level(ps, lvl), m_delta(ps.m_delta_level) {m_delta = true;}
|
||||
~scoped_delta_level() {m_delta = false;}
|
||||
};
|
||||
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
#endif
|
2333
src/muz/spacer/spacer_qe_project.cpp
Normal file
2333
src/muz/spacer/spacer_qe_project.cpp
Normal file
File diff suppressed because it is too large
Load diff
49
src/muz/spacer/spacer_qe_project.h
Normal file
49
src/muz/spacer/spacer_qe_project.h
Normal file
|
@ -0,0 +1,49 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_qe_project.h
|
||||
|
||||
Abstract:
|
||||
|
||||
Model-based projection
|
||||
|
||||
Author:
|
||||
|
||||
Anvesh Komuravelli
|
||||
Arie Gurfinkel (arie)
|
||||
|
||||
Notes:
|
||||
|
||||
--*/
|
||||
#ifndef SPACER_QE_PROJECT_H_
|
||||
#define SPACER_QE_PROJECT_H_
|
||||
|
||||
#include "model.h"
|
||||
#include "expr_map.h"
|
||||
|
||||
namespace qe {
|
||||
/**
|
||||
Loos-Weispfenning model-based projection for a basic conjunction.
|
||||
Lits is a vector of literals.
|
||||
return vector of variables that could not be projected.
|
||||
*/
|
||||
expr_ref arith_project(model& model, app_ref_vector& vars, expr_ref_vector const& lits);
|
||||
|
||||
void arith_project(model& model, app_ref_vector& vars, expr_ref& fml);
|
||||
|
||||
void arith_project(model& model, app_ref_vector& vars, expr_ref& fml, expr_map& map);
|
||||
|
||||
void array_project_eqs (model& model, app_ref_vector& arr_vars, expr_ref& fml, app_ref_vector& aux_vars);
|
||||
|
||||
void reduce_array_selects (model& mdl, app_ref_vector const& arr_vars, expr_ref& fml, bool reduce_all_selects = false);
|
||||
|
||||
void reduce_array_selects (model& mdl, expr_ref& fml);
|
||||
|
||||
void array_project_selects (model& model, app_ref_vector& arr_vars, expr_ref& fml, app_ref_vector& aux_vars);
|
||||
|
||||
void array_project (model& model, app_ref_vector& arr_vars, expr_ref& fml, app_ref_vector& aux_vars, bool reduce_all_selects = false);
|
||||
};
|
||||
|
||||
#endif
|
79
src/muz/spacer/spacer_smt_context_manager.cpp
Normal file
79
src/muz/spacer/spacer_smt_context_manager.cpp
Normal file
|
@ -0,0 +1,79 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_smt_context_manager.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
Manager of smt contexts
|
||||
|
||||
Author:
|
||||
|
||||
Nikolaj Bjorner (nbjorner) 2011-11-26.
|
||||
Arie Gurfinkel
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#include "spacer_smt_context_manager.h"
|
||||
#include "has_free_vars.h"
|
||||
#include "ast_pp.h"
|
||||
#include "ast_smt_pp.h"
|
||||
#include <sstream>
|
||||
#include "smt_params.h"
|
||||
|
||||
#include "ast_pp_util.h"
|
||||
#include "smt_context.h"
|
||||
#include "spacer_util.h"
|
||||
namespace spacer {
|
||||
|
||||
|
||||
|
||||
|
||||
smt_context_manager::smt_context_manager(ast_manager &m,
|
||||
unsigned max_num_contexts,
|
||||
const params_ref &p) :
|
||||
m_fparams(p),
|
||||
m(m),
|
||||
m_max_num_contexts(max_num_contexts),
|
||||
m_num_contexts(0) { m_stats.reset();}
|
||||
|
||||
|
||||
smt_context_manager::~smt_context_manager()
|
||||
{
|
||||
std::for_each(m_solvers.begin(), m_solvers.end(),
|
||||
delete_proc<spacer::virtual_solver_factory>());
|
||||
}
|
||||
|
||||
virtual_solver* smt_context_manager::mk_fresh()
|
||||
{
|
||||
++m_num_contexts;
|
||||
virtual_solver_factory *solver_factory = 0;
|
||||
|
||||
if (m_max_num_contexts == 0 || m_solvers.size() < m_max_num_contexts) {
|
||||
m_solvers.push_back(alloc(spacer::virtual_solver_factory, m, m_fparams));
|
||||
solver_factory = m_solvers.back();
|
||||
} else
|
||||
{ solver_factory = m_solvers[(m_num_contexts - 1) % m_max_num_contexts]; }
|
||||
|
||||
return solver_factory->mk_solver();
|
||||
}
|
||||
|
||||
void smt_context_manager::collect_statistics(statistics& st) const
|
||||
{
|
||||
for (unsigned i = 0; i < m_solvers.size(); ++i) {
|
||||
m_solvers[i]->collect_statistics(st);
|
||||
}
|
||||
}
|
||||
|
||||
void smt_context_manager::reset_statistics()
|
||||
{
|
||||
for (unsigned i = 0; i < m_solvers.size(); ++i) {
|
||||
m_solvers[i]->reset_statistics();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
};
|
68
src/muz/spacer/spacer_smt_context_manager.h
Normal file
68
src/muz/spacer/spacer_smt_context_manager.h
Normal file
|
@ -0,0 +1,68 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_smt_context_manager.h
|
||||
|
||||
Abstract:
|
||||
|
||||
Manager of smt contexts
|
||||
|
||||
Author:
|
||||
|
||||
Nikolaj Bjorner (nbjorner) 2011-11-26.
|
||||
Arie Gurfinkel
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef _SPACER_SMT_CONTEXT_MANAGER_H_
|
||||
#define _SPACER_SMT_CONTEXT_MANAGER_H_
|
||||
|
||||
#include "smt_kernel.h"
|
||||
#include "func_decl_dependencies.h"
|
||||
#include "dl_util.h"
|
||||
#include "spacer_virtual_solver.h"
|
||||
#include "stopwatch.h"
|
||||
|
||||
namespace spacer {
|
||||
|
||||
class smt_context_manager {
|
||||
|
||||
struct stats {
|
||||
unsigned m_num_smt_checks;
|
||||
unsigned m_num_sat_smt_checks;
|
||||
stats() { reset(); }
|
||||
void reset() { memset(this, 0, sizeof(*this)); }
|
||||
};
|
||||
|
||||
smt_params m_fparams;
|
||||
ast_manager& m;
|
||||
unsigned m_max_num_contexts;
|
||||
ptr_vector<virtual_solver_factory> m_solvers;
|
||||
unsigned m_num_contexts;
|
||||
|
||||
|
||||
stats m_stats;
|
||||
stopwatch m_check_watch;
|
||||
stopwatch m_check_sat_watch;
|
||||
|
||||
public:
|
||||
smt_context_manager(ast_manager& m, unsigned max_num_contexts = 1,
|
||||
const params_ref &p = params_ref::get_empty());
|
||||
|
||||
~smt_context_manager();
|
||||
virtual_solver* mk_fresh();
|
||||
|
||||
void collect_statistics(statistics& st) const;
|
||||
void reset_statistics();
|
||||
|
||||
void updt_params(params_ref const &p) { m_fparams.updt_params(p); }
|
||||
smt_params& fparams() {return m_fparams;}
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
#endif
|
608
src/muz/spacer/spacer_sym_mux.cpp
Normal file
608
src/muz/spacer/spacer_sym_mux.cpp
Normal file
|
@ -0,0 +1,608 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
sym_mux.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
A symbol multiplexer that helps with having multiple versions of each of a set of symbols.
|
||||
|
||||
Author:
|
||||
|
||||
Krystof Hoder (t-khoder) 2011-9-8.
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#include <sstream>
|
||||
#include "ast_pp.h"
|
||||
#include "for_each_expr.h"
|
||||
#include "model.h"
|
||||
#include "rewriter.h"
|
||||
#include "rewriter_def.h"
|
||||
#include "spacer_util.h"
|
||||
#include "spacer_sym_mux.h"
|
||||
|
||||
using namespace spacer;
|
||||
|
||||
sym_mux::sym_mux(ast_manager & m, const std::vector<std::string> & suffixes)
|
||||
: m(m), m_ref_holder(m), m_next_sym_suffix_idx(0), m_suffixes(suffixes)
|
||||
{
|
||||
unsigned suf_sz = m_suffixes.size();
|
||||
for (unsigned i = 0; i < suf_sz; ++i) {
|
||||
symbol suff_sym = symbol(m_suffixes[i].c_str());
|
||||
m_used_suffixes.insert(suff_sym);
|
||||
}
|
||||
}
|
||||
|
||||
std::string sym_mux::get_suffix(unsigned i) const
|
||||
{
|
||||
while (m_suffixes.size() <= i) {
|
||||
std::string new_suffix;
|
||||
symbol new_syffix_sym;
|
||||
do {
|
||||
std::stringstream stm;
|
||||
stm << '_' << m_next_sym_suffix_idx;
|
||||
m_next_sym_suffix_idx++;
|
||||
new_suffix = stm.str();
|
||||
new_syffix_sym = symbol(new_suffix.c_str());
|
||||
} while (m_used_suffixes.contains(new_syffix_sym));
|
||||
m_used_suffixes.insert(new_syffix_sym);
|
||||
m_suffixes.push_back(new_suffix);
|
||||
}
|
||||
return m_suffixes[i];
|
||||
}
|
||||
|
||||
void sym_mux::create_tuple(func_decl* prefix, unsigned arity, sort * const * domain, sort * range,
|
||||
unsigned tuple_length, decl_vector & tuple)
|
||||
{
|
||||
SASSERT(tuple_length > 0);
|
||||
while (tuple.size() < tuple_length) {
|
||||
tuple.push_back(0);
|
||||
}
|
||||
SASSERT(tuple.size() == tuple_length);
|
||||
std::string pre = prefix->get_name().str();
|
||||
for (unsigned i = 0; i < tuple_length; i++) {
|
||||
|
||||
if (tuple[i] != 0) {
|
||||
SASSERT(tuple[i]->get_arity() == arity);
|
||||
SASSERT(tuple[i]->get_range() == range);
|
||||
//domain should match as well, but we won't bother checking an array equality
|
||||
} else {
|
||||
std::string name = pre + get_suffix(i);
|
||||
tuple[i] = m.mk_func_decl(symbol(name.c_str()), arity, domain, range);
|
||||
}
|
||||
m_ref_holder.push_back(tuple[i]);
|
||||
m_sym2idx.insert(tuple[i], i);
|
||||
m_sym2prim.insert(tuple[i], tuple[0]);
|
||||
}
|
||||
|
||||
m_prim2all.insert(tuple[0], tuple);
|
||||
m_prefix2prim.insert(prefix, tuple[0]);
|
||||
m_prim2prefix.insert(tuple[0], prefix);
|
||||
m_prim_preds.push_back(tuple[0]);
|
||||
m_ref_holder.push_back(prefix);
|
||||
}
|
||||
|
||||
void sym_mux::ensure_tuple_size(func_decl * prim, unsigned sz) const
|
||||
{
|
||||
SASSERT(m_prim2all.contains(prim));
|
||||
decl_vector& tuple = m_prim2all.find_core(prim)->get_data().m_value;
|
||||
SASSERT(tuple[0] == prim);
|
||||
|
||||
if (sz <= tuple.size()) { return; }
|
||||
|
||||
func_decl * prefix;
|
||||
TRUSTME(m_prim2prefix.find(prim, prefix));
|
||||
std::string prefix_name = prefix->get_name().bare_str();
|
||||
for (unsigned i = tuple.size(); i < sz; ++i) {
|
||||
std::string name = prefix_name + get_suffix(i);
|
||||
func_decl * new_sym = m.mk_func_decl(symbol(name.c_str()), prefix->get_arity(),
|
||||
prefix->get_domain(), prefix->get_range());
|
||||
|
||||
tuple.push_back(new_sym);
|
||||
m_ref_holder.push_back(new_sym);
|
||||
m_sym2idx.insert(new_sym, i);
|
||||
m_sym2prim.insert(new_sym, prim);
|
||||
}
|
||||
}
|
||||
|
||||
func_decl * sym_mux::conv(func_decl * sym, unsigned src_idx, unsigned tgt_idx) const
|
||||
{
|
||||
if (src_idx == tgt_idx) { return sym; }
|
||||
func_decl * prim = (src_idx == 0) ? sym : get_primary(sym);
|
||||
if (tgt_idx > src_idx) {
|
||||
ensure_tuple_size(prim, tgt_idx + 1);
|
||||
}
|
||||
decl_vector & sym_vect = m_prim2all.find_core(prim)->get_data().m_value;
|
||||
SASSERT(sym_vect[src_idx] == sym);
|
||||
return sym_vect[tgt_idx];
|
||||
}
|
||||
|
||||
|
||||
func_decl * sym_mux::get_or_create_symbol_by_prefix(func_decl* prefix, unsigned idx,
|
||||
unsigned arity, sort * const * domain, sort * range)
|
||||
{
|
||||
func_decl * prim = try_get_primary_by_prefix(prefix);
|
||||
if (prim) {
|
||||
SASSERT(prim->get_arity() == arity);
|
||||
SASSERT(prim->get_range() == range);
|
||||
//domain should match as well, but we won't bother checking an array equality
|
||||
|
||||
return conv(prim, 0, idx);
|
||||
}
|
||||
|
||||
decl_vector syms;
|
||||
create_tuple(prefix, arity, domain, range, idx + 1, syms);
|
||||
return syms[idx];
|
||||
}
|
||||
|
||||
bool sym_mux::is_muxed_lit(expr * e, unsigned idx) const
|
||||
{
|
||||
if (!is_app(e)) { return false; }
|
||||
app * a = to_app(e);
|
||||
if (m.is_not(a) && is_app(a->get_arg(0))) {
|
||||
a = to_app(a->get_arg(0));
|
||||
}
|
||||
return is_muxed(a->get_decl());
|
||||
}
|
||||
|
||||
|
||||
struct sym_mux::formula_checker {
|
||||
formula_checker(const sym_mux & parent, bool all, unsigned idx) :
|
||||
m_parent(parent), m_all(all), m_idx(idx),
|
||||
m_found_what_needed(false)
|
||||
{
|
||||
}
|
||||
|
||||
void operator()(expr * e)
|
||||
{
|
||||
if (m_found_what_needed || !is_app(e)) { return; }
|
||||
|
||||
func_decl * sym = to_app(e)->get_decl();
|
||||
unsigned sym_idx;
|
||||
if (!m_parent.try_get_index(sym, sym_idx)) { return; }
|
||||
|
||||
bool have_idx = sym_idx == m_idx;
|
||||
|
||||
if (m_all ? (!have_idx) : have_idx) {
|
||||
m_found_what_needed = true;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
bool all_have_idx() const
|
||||
{
|
||||
SASSERT(m_all); //we were looking for the queried property
|
||||
return !m_found_what_needed;
|
||||
}
|
||||
|
||||
bool some_with_idx() const
|
||||
{
|
||||
SASSERT(!m_all); //we were looking for the queried property
|
||||
return m_found_what_needed;
|
||||
}
|
||||
|
||||
private:
|
||||
const sym_mux & m_parent;
|
||||
bool m_all;
|
||||
unsigned m_idx;
|
||||
|
||||
/**
|
||||
If we check whether all muxed symbols are of given index, we look for
|
||||
counter-examples, checking whether form contains a muxed symbol of an index,
|
||||
we look for symbol of index m_idx.
|
||||
*/
|
||||
bool m_found_what_needed;
|
||||
};
|
||||
|
||||
bool sym_mux::contains(expr * e, unsigned idx) const
|
||||
{
|
||||
formula_checker chck(*this, false, idx);
|
||||
for_each_expr(chck, m_visited, e);
|
||||
m_visited.reset();
|
||||
return chck.some_with_idx();
|
||||
}
|
||||
|
||||
bool sym_mux::is_homogenous_formula(expr * e, unsigned idx) const
|
||||
{
|
||||
formula_checker chck(*this, true, idx);
|
||||
for_each_expr(chck, m_visited, e);
|
||||
m_visited.reset();
|
||||
return chck.all_have_idx();
|
||||
}
|
||||
|
||||
bool sym_mux::is_homogenous(const expr_ref_vector & vect, unsigned idx) const
|
||||
{
|
||||
expr * const * begin = vect.c_ptr();
|
||||
expr * const * end = begin + vect.size();
|
||||
for (expr * const * it = begin; it != end; it++) {
|
||||
if (!is_homogenous_formula(*it, idx)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
class sym_mux::index_collector {
|
||||
sym_mux const& m_parent;
|
||||
svector<bool> m_indices;
|
||||
public:
|
||||
index_collector(sym_mux const& s):
|
||||
m_parent(s) {}
|
||||
|
||||
void operator()(expr * e)
|
||||
{
|
||||
if (is_app(e)) {
|
||||
func_decl * sym = to_app(e)->get_decl();
|
||||
unsigned idx;
|
||||
if (m_parent.try_get_index(sym, idx)) {
|
||||
SASSERT(idx > 0);
|
||||
--idx;
|
||||
if (m_indices.size() <= idx) {
|
||||
m_indices.resize(idx + 1, false);
|
||||
}
|
||||
m_indices[idx] = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void extract(unsigned_vector& indices)
|
||||
{
|
||||
for (unsigned i = 0; i < m_indices.size(); ++i) {
|
||||
if (m_indices[i]) {
|
||||
indices.push_back(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
void sym_mux::collect_indices(expr* e, unsigned_vector& indices) const
|
||||
{
|
||||
indices.reset();
|
||||
index_collector collector(*this);
|
||||
for_each_expr(collector, m_visited, e);
|
||||
m_visited.reset();
|
||||
collector.extract(indices);
|
||||
}
|
||||
|
||||
class sym_mux::variable_collector {
|
||||
sym_mux const& m_parent;
|
||||
vector<ptr_vector<app> >& m_vars;
|
||||
public:
|
||||
variable_collector(sym_mux const& s, vector<ptr_vector<app> >& vars):
|
||||
m_parent(s), m_vars(vars) {}
|
||||
|
||||
void operator()(expr * e)
|
||||
{
|
||||
if (is_app(e)) {
|
||||
func_decl * sym = to_app(e)->get_decl();
|
||||
unsigned idx;
|
||||
if (m_parent.try_get_index(sym, idx)) {
|
||||
SASSERT(idx > 0);
|
||||
--idx;
|
||||
if (m_vars.size() <= idx) {
|
||||
m_vars.resize(idx + 1, ptr_vector<app>());
|
||||
}
|
||||
m_vars[idx].push_back(to_app(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void sym_mux::collect_variables(expr* e, vector<ptr_vector<app> >& vars) const
|
||||
{
|
||||
vars.reset();
|
||||
variable_collector collector(*this, vars);
|
||||
for_each_expr(collector, m_visited, e);
|
||||
m_visited.reset();
|
||||
}
|
||||
|
||||
class sym_mux::hmg_checker {
|
||||
const sym_mux & m_parent;
|
||||
|
||||
bool m_found_idx;
|
||||
unsigned m_idx;
|
||||
bool m_multiple_indexes;
|
||||
|
||||
public:
|
||||
hmg_checker(const sym_mux & parent) :
|
||||
m_parent(parent), m_found_idx(false), m_multiple_indexes(false)
|
||||
{
|
||||
}
|
||||
|
||||
void operator()(expr * e)
|
||||
{
|
||||
if (m_multiple_indexes || !is_app(e)) { return; }
|
||||
|
||||
func_decl * sym = to_app(e)->get_decl();
|
||||
unsigned sym_idx;
|
||||
if (!m_parent.try_get_index(sym, sym_idx)) { return; }
|
||||
|
||||
if (!m_found_idx) {
|
||||
m_found_idx = true;
|
||||
m_idx = sym_idx;
|
||||
return;
|
||||
}
|
||||
if (m_idx == sym_idx) { return; }
|
||||
m_multiple_indexes = true;
|
||||
}
|
||||
|
||||
bool has_multiple_indexes() const
|
||||
{
|
||||
return m_multiple_indexes;
|
||||
}
|
||||
};
|
||||
|
||||
bool sym_mux::is_homogenous_formula(expr * e) const
|
||||
{
|
||||
hmg_checker chck(*this);
|
||||
for_each_expr(chck, m_visited, e);
|
||||
m_visited.reset();
|
||||
return !chck.has_multiple_indexes();
|
||||
}
|
||||
|
||||
|
||||
struct sym_mux::conv_rewriter_cfg : public default_rewriter_cfg {
|
||||
private:
|
||||
ast_manager & m;
|
||||
const sym_mux & m_parent;
|
||||
unsigned m_from_idx;
|
||||
unsigned m_to_idx;
|
||||
bool m_homogenous;
|
||||
public:
|
||||
conv_rewriter_cfg(const sym_mux & parent, unsigned from_idx, unsigned to_idx, bool homogenous)
|
||||
: m(parent.get_manager()),
|
||||
m_parent(parent),
|
||||
m_from_idx(from_idx),
|
||||
m_to_idx(to_idx),
|
||||
m_homogenous(homogenous) {}
|
||||
|
||||
bool get_subst(expr * s, expr * & t, proof * & t_pr)
|
||||
{
|
||||
if (!is_app(s)) { return false; }
|
||||
app * a = to_app(s);
|
||||
func_decl * sym = a->get_decl();
|
||||
if (!m_parent.has_index(sym, m_from_idx)) {
|
||||
SASSERT(!m_homogenous || !m_parent.is_muxed(sym));
|
||||
return false;
|
||||
}
|
||||
func_decl * tgt = m_parent.conv(sym, m_from_idx, m_to_idx);
|
||||
|
||||
t = m.mk_app(tgt, a->get_args());
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
void sym_mux::conv_formula(expr * f, unsigned src_idx, unsigned tgt_idx, expr_ref & res, bool homogenous) const
|
||||
{
|
||||
if (src_idx == tgt_idx) {
|
||||
res = f;
|
||||
return;
|
||||
}
|
||||
conv_rewriter_cfg r_cfg(*this, src_idx, tgt_idx, homogenous);
|
||||
rewriter_tpl<conv_rewriter_cfg> rwr(m, false, r_cfg);
|
||||
rwr(f, res);
|
||||
}
|
||||
|
||||
struct sym_mux::shifting_rewriter_cfg : public default_rewriter_cfg {
|
||||
private:
|
||||
ast_manager & m;
|
||||
const sym_mux & m_parent;
|
||||
int m_shift;
|
||||
public:
|
||||
shifting_rewriter_cfg(const sym_mux & parent, int shift)
|
||||
: m(parent.get_manager()),
|
||||
m_parent(parent),
|
||||
m_shift(shift) {}
|
||||
|
||||
bool get_subst(expr * s, expr * & t, proof * & t_pr)
|
||||
{
|
||||
if (!is_app(s)) { return false; }
|
||||
app * a = to_app(s);
|
||||
func_decl * sym = a->get_decl();
|
||||
|
||||
unsigned idx;
|
||||
if (!m_parent.try_get_index(sym, idx)) {
|
||||
return false;
|
||||
}
|
||||
SASSERT(static_cast<int>(idx) + m_shift >= 0);
|
||||
func_decl * tgt = m_parent.conv(sym, idx, idx + m_shift);
|
||||
t = m.mk_app(tgt, a->get_args());
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
void sym_mux::shift_formula(expr * f, int dist, expr_ref & res) const
|
||||
{
|
||||
if (dist == 0) {
|
||||
res = f;
|
||||
return;
|
||||
}
|
||||
shifting_rewriter_cfg r_cfg(*this, dist);
|
||||
rewriter_tpl<shifting_rewriter_cfg> rwr(m, false, r_cfg);
|
||||
rwr(f, res);
|
||||
}
|
||||
|
||||
void sym_mux::conv_formula_vector(const expr_ref_vector & vect, unsigned src_idx, unsigned tgt_idx,
|
||||
expr_ref_vector & res) const
|
||||
{
|
||||
res.reset();
|
||||
expr * const * begin = vect.c_ptr();
|
||||
expr * const * end = begin + vect.size();
|
||||
for (expr * const * it = begin; it != end; it++) {
|
||||
expr_ref converted(m);
|
||||
conv_formula(*it, src_idx, tgt_idx, converted);
|
||||
res.push_back(converted);
|
||||
}
|
||||
}
|
||||
|
||||
void sym_mux::filter_idx(expr_ref_vector & vect, unsigned idx) const
|
||||
{
|
||||
unsigned i = 0;
|
||||
while (i < vect.size()) {
|
||||
expr* e = vect[i].get();
|
||||
if (contains(e, idx) && is_homogenous_formula(e, idx)) {
|
||||
i++;
|
||||
} else {
|
||||
//we don't allow mixing states inside vector elements
|
||||
SASSERT(!contains(e, idx));
|
||||
vect[i] = vect.back();
|
||||
vect.pop_back();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void sym_mux::partition_o_idx(
|
||||
expr_ref_vector const& lits,
|
||||
expr_ref_vector& o_lits,
|
||||
expr_ref_vector& other, unsigned idx) const
|
||||
{
|
||||
|
||||
for (unsigned i = 0; i < lits.size(); ++i) {
|
||||
if (contains(lits[i], idx) && is_homogenous_formula(lits[i], idx)) {
|
||||
o_lits.push_back(lits[i]);
|
||||
} else {
|
||||
other.push_back(lits[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
class sym_mux::nonmodel_sym_checker {
|
||||
const sym_mux & m_parent;
|
||||
|
||||
bool m_found;
|
||||
public:
|
||||
nonmodel_sym_checker(const sym_mux & parent) :
|
||||
m_parent(parent), m_found(false)
|
||||
{
|
||||
}
|
||||
|
||||
void operator()(expr * e)
|
||||
{
|
||||
if (m_found || !is_app(e)) { return; }
|
||||
|
||||
func_decl * sym = to_app(e)->get_decl();
|
||||
|
||||
if (m_parent.is_non_model_sym(sym)) {
|
||||
m_found = true;
|
||||
}
|
||||
}
|
||||
|
||||
bool found() const
|
||||
{
|
||||
return m_found;
|
||||
}
|
||||
};
|
||||
|
||||
bool sym_mux::has_nonmodel_symbol(expr * e) const
|
||||
{
|
||||
nonmodel_sym_checker chck(*this);
|
||||
for_each_expr(chck, e);
|
||||
return chck.found();
|
||||
}
|
||||
|
||||
void sym_mux::filter_non_model_lits(expr_ref_vector & vect) const
|
||||
{
|
||||
unsigned i = 0;
|
||||
while (i < vect.size()) {
|
||||
if (!has_nonmodel_symbol(vect[i].get())) {
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
vect[i] = vect.back();
|
||||
vect.pop_back();
|
||||
}
|
||||
}
|
||||
|
||||
class sym_mux::decl_idx_comparator {
|
||||
const sym_mux & m_parent;
|
||||
public:
|
||||
decl_idx_comparator(const sym_mux & parent)
|
||||
: m_parent(parent)
|
||||
{ }
|
||||
|
||||
bool operator()(func_decl * sym1, func_decl * sym2)
|
||||
{
|
||||
unsigned idx1, idx2;
|
||||
if (!m_parent.try_get_index(sym1, idx1)) { idx1 = UINT_MAX; }
|
||||
if (!m_parent.try_get_index(sym2, idx2)) { idx2 = UINT_MAX; }
|
||||
|
||||
if (idx1 != idx2) { return idx1 < idx2; }
|
||||
return lt(sym1->get_name(), sym2->get_name());
|
||||
}
|
||||
};
|
||||
|
||||
std::string sym_mux::pp_model(const model_core & mdl) const
|
||||
{
|
||||
decl_vector consts;
|
||||
unsigned sz = mdl.get_num_constants();
|
||||
for (unsigned i = 0; i < sz; i++) {
|
||||
func_decl * d = mdl.get_constant(i);
|
||||
consts.push_back(d);
|
||||
}
|
||||
|
||||
std::sort(consts.begin(), consts.end(), decl_idx_comparator(*this));
|
||||
|
||||
std::stringstream res;
|
||||
|
||||
decl_vector::iterator end = consts.end();
|
||||
for (decl_vector::iterator it = consts.begin(); it != end; it++) {
|
||||
func_decl * d = *it;
|
||||
std::string name = d->get_name().str();
|
||||
const char * arrow = " -> ";
|
||||
res << name << arrow;
|
||||
unsigned indent = static_cast<unsigned>(name.length() + strlen(arrow));
|
||||
res << mk_pp(mdl.get_const_interp(d), m, indent) << "\n";
|
||||
|
||||
if (it + 1 != end) {
|
||||
unsigned idx1, idx2;
|
||||
if (!try_get_index(*it, idx1)) { idx1 = UINT_MAX; }
|
||||
if (!try_get_index(*(it + 1), idx2)) { idx2 = UINT_MAX; }
|
||||
if (idx1 != idx2) { res << "\n"; }
|
||||
}
|
||||
}
|
||||
return res.str();
|
||||
}
|
||||
|
||||
|
||||
#if 0
|
||||
|
||||
class sym_mux::index_renamer_cfg : public default_rewriter_cfg {
|
||||
const sym_mux & m_parent;
|
||||
unsigned m_idx;
|
||||
|
||||
public:
|
||||
index_renamer_cfg(const sym_mux & p, unsigned idx) : m_parent(p), m_idx(idx) {}
|
||||
|
||||
bool get_subst(expr * s, expr * & t, proof * & t_pr)
|
||||
{
|
||||
if (!is_app(s)) { return false; }
|
||||
app * a = to_app(s);
|
||||
if (a->get_family_id() != null_family_id) {
|
||||
return false;
|
||||
}
|
||||
func_decl * sym = a->get_decl();
|
||||
unsigned idx;
|
||||
if (!m_parent.try_get_index(sym, idx)) {
|
||||
return false;
|
||||
}
|
||||
if (m_idx == idx) {
|
||||
return false;
|
||||
}
|
||||
ast_manager& m = m_parent.get_manager();
|
||||
symbol name = symbol((sym->get_name().str() + "!").c_str());
|
||||
func_decl * tgt = m.mk_func_decl(name, sym->get_arity(), sym->get_domain(), sym->get_range());
|
||||
t = m.mk_app(tgt, a->get_num_args(), a->get_args());
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
256
src/muz/spacer/spacer_sym_mux.h
Normal file
256
src/muz/spacer/spacer_sym_mux.h
Normal file
|
@ -0,0 +1,256 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
sym_mux.h
|
||||
|
||||
Abstract:
|
||||
|
||||
A symbol multiplexer that helps with having multiple versions of each of a set of symbols.
|
||||
|
||||
Author:
|
||||
|
||||
Krystof Hoder (t-khoder) 2011-9-8.
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef _SYM_MUX_H_
|
||||
#define _SYM_MUX_H_
|
||||
|
||||
#include "ast.h"
|
||||
#include "map.h"
|
||||
#include "vector.h"
|
||||
#include <vector>
|
||||
|
||||
class model_core;
|
||||
|
||||
namespace spacer {
|
||||
class sym_mux {
|
||||
public:
|
||||
typedef ptr_vector<app> app_vector;
|
||||
typedef ptr_vector<func_decl> decl_vector;
|
||||
private:
|
||||
typedef obj_map<func_decl, unsigned> sym2u;
|
||||
typedef obj_map<func_decl, decl_vector> sym2dv;
|
||||
typedef obj_map<func_decl, func_decl *> sym2sym;
|
||||
typedef obj_map<func_decl, func_decl *> sym2pred;
|
||||
typedef hashtable<symbol, symbol_hash_proc, symbol_eq_proc> symbols;
|
||||
|
||||
ast_manager & m;
|
||||
mutable ast_ref_vector m_ref_holder;
|
||||
mutable expr_mark m_visited;
|
||||
|
||||
mutable unsigned m_next_sym_suffix_idx;
|
||||
mutable symbols m_used_suffixes;
|
||||
/** Here we have default suffixes for each of the variants */
|
||||
mutable std::vector<std::string> m_suffixes;
|
||||
|
||||
|
||||
/**
|
||||
Primary symbol is the 0-th variant. This member maps from primary symbol
|
||||
to vector of all its variants (including the primary variant).
|
||||
*/
|
||||
sym2dv m_prim2all;
|
||||
|
||||
/**
|
||||
For each symbol contains its variant index
|
||||
*/
|
||||
mutable sym2u m_sym2idx;
|
||||
/**
|
||||
For each symbol contains its primary variant
|
||||
*/
|
||||
mutable sym2sym m_sym2prim;
|
||||
|
||||
/**
|
||||
Maps prefixes passed to the create_tuple to
|
||||
the primary symbol created from it.
|
||||
*/
|
||||
sym2pred m_prefix2prim;
|
||||
|
||||
/**
|
||||
Maps pripary symbols to prefixes that were used to create them.
|
||||
*/
|
||||
sym2sym m_prim2prefix;
|
||||
|
||||
decl_vector m_prim_preds;
|
||||
|
||||
obj_hashtable<func_decl> m_non_model_syms;
|
||||
|
||||
struct formula_checker;
|
||||
struct conv_rewriter_cfg;
|
||||
struct shifting_rewriter_cfg;
|
||||
class decl_idx_comparator;
|
||||
class hmg_checker;
|
||||
class nonmodel_sym_checker;
|
||||
class index_renamer_cfg;
|
||||
class index_collector;
|
||||
class variable_collector;
|
||||
|
||||
std::string get_suffix(unsigned i) const;
|
||||
void ensure_tuple_size(func_decl * prim, unsigned sz) const;
|
||||
|
||||
expr_ref isolate_o_idx(expr* e, unsigned idx) const;
|
||||
public:
|
||||
sym_mux(ast_manager & m, const std::vector<std::string> & suffixes);
|
||||
|
||||
ast_manager & get_manager() const { return m; }
|
||||
|
||||
bool is_muxed(func_decl * sym) const { return m_sym2idx.contains(sym); }
|
||||
|
||||
bool try_get_index(func_decl * sym, unsigned & idx) const
|
||||
{
|
||||
return m_sym2idx.find(sym, idx);
|
||||
}
|
||||
|
||||
bool has_index(func_decl * sym, unsigned idx) const
|
||||
{
|
||||
unsigned actual_idx;
|
||||
return try_get_index(sym, actual_idx) && idx == actual_idx;
|
||||
}
|
||||
|
||||
/** Return primary symbol. sym must be muxed. */
|
||||
func_decl * get_primary(func_decl * sym) const
|
||||
{
|
||||
func_decl * prim;
|
||||
TRUSTME(m_sym2prim.find(sym, prim));
|
||||
return prim;
|
||||
}
|
||||
|
||||
/**
|
||||
Return primary symbol created from prefix, or 0 if the prefix was never used.
|
||||
*/
|
||||
func_decl * try_get_primary_by_prefix(func_decl* prefix) const
|
||||
{
|
||||
func_decl * res;
|
||||
if(!m_prefix2prim.find(prefix, res)) {
|
||||
return 0;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
Return symbol created from prefix, or 0 if the prefix was never used.
|
||||
*/
|
||||
func_decl * try_get_by_prefix(func_decl* prefix, unsigned idx) const
|
||||
{
|
||||
func_decl * prim = try_get_primary_by_prefix(prefix);
|
||||
if(!prim) {
|
||||
return 0;
|
||||
}
|
||||
return conv(prim, 0, idx);
|
||||
}
|
||||
|
||||
/**
|
||||
Marks symbol as non-model which means it will not appear in models collected by
|
||||
get_muxed_cube_from_model function.
|
||||
This is to take care of auxiliary symbols introduced by the disjunction relations
|
||||
to relativize lemmas coming from disjuncts.
|
||||
*/
|
||||
void mark_as_non_model(func_decl * sym)
|
||||
{
|
||||
SASSERT(is_muxed(sym));
|
||||
m_non_model_syms.insert(get_primary(sym));
|
||||
}
|
||||
|
||||
func_decl * get_or_create_symbol_by_prefix(func_decl* prefix, unsigned idx,
|
||||
unsigned arity, sort * const * domain, sort * range);
|
||||
|
||||
|
||||
|
||||
bool is_muxed_lit(expr * e, unsigned idx) const;
|
||||
|
||||
bool is_non_model_sym(func_decl * s) const
|
||||
{
|
||||
return is_muxed(s) && m_non_model_syms.contains(get_primary(s));
|
||||
}
|
||||
|
||||
/**
|
||||
Create a multiplexed tuple of propositional constants.
|
||||
Symbols may be suplied in the tuple vector,
|
||||
those beyond the size of the array and those with corresponding positions
|
||||
assigned to zero will be created using prefix.
|
||||
Tuple length must be at least one.
|
||||
*/
|
||||
void create_tuple(func_decl* prefix, unsigned arity, sort * const * domain, sort * range,
|
||||
unsigned tuple_length, decl_vector & tuple);
|
||||
|
||||
/**
|
||||
Return true if the only multiplexed symbols which e contains are of index idx.
|
||||
*/
|
||||
bool is_homogenous_formula(expr * e, unsigned idx) const;
|
||||
bool is_homogenous(const expr_ref_vector & vect, unsigned idx) const;
|
||||
|
||||
/**
|
||||
Return true if all multiplexed symbols which e contains are of one index.
|
||||
*/
|
||||
bool is_homogenous_formula(expr * e) const;
|
||||
|
||||
/**
|
||||
Return true if expression e contains a muxed symbol of index idx.
|
||||
*/
|
||||
bool contains(expr * e, unsigned idx) const;
|
||||
|
||||
/**
|
||||
Collect indices used in expression.
|
||||
*/
|
||||
void collect_indices(expr* e, unsigned_vector& indices) const;
|
||||
|
||||
/**
|
||||
Collect used variables of each index.
|
||||
*/
|
||||
void collect_variables(expr* e, vector<ptr_vector<app> >& vars) const;
|
||||
|
||||
/**
|
||||
Convert symbol sym which has to be of src_idx variant into variant tgt_idx.
|
||||
*/
|
||||
func_decl * conv(func_decl * sym, unsigned src_idx, unsigned tgt_idx) const;
|
||||
|
||||
|
||||
/**
|
||||
Convert src_idx symbols in formula f variant into tgt_idx.
|
||||
If homogenous is true, formula cannot contain symbols of other variants.
|
||||
*/
|
||||
void conv_formula(expr * f, unsigned src_idx, unsigned tgt_idx, expr_ref & res, bool homogenous = true) const;
|
||||
void conv_formula_vector(const expr_ref_vector & vect, unsigned src_idx, unsigned tgt_idx,
|
||||
expr_ref_vector & res) const;
|
||||
|
||||
/**
|
||||
Shifts the muxed symbols in f by dist. Dist can be negative, but it should never shift
|
||||
symbol index to a negative value.
|
||||
*/
|
||||
void shift_formula(expr * f, int dist, expr_ref & res) const;
|
||||
|
||||
/**
|
||||
Remove from vect literals (atoms or negations of atoms) of symbols
|
||||
that contain multiplexed symbols with indexes other than idx.
|
||||
|
||||
Each of the literals can contain only symbols multiplexed with one index
|
||||
(this trivially holds if the literals are propositional).
|
||||
|
||||
Order of elements in vect may be modified by this function
|
||||
*/
|
||||
void filter_idx(expr_ref_vector & vect, unsigned idx) const;
|
||||
|
||||
/**
|
||||
Partition literals into o_literals and others.
|
||||
*/
|
||||
void partition_o_idx(expr_ref_vector const& lits,
|
||||
expr_ref_vector& o_lits,
|
||||
expr_ref_vector& other, unsigned idx) const;
|
||||
|
||||
bool has_nonmodel_symbol(expr * e) const;
|
||||
void filter_non_model_lits(expr_ref_vector & vect) const;
|
||||
|
||||
func_decl * const * begin_prim_preds() const { return m_prim_preds.begin(); }
|
||||
func_decl * const * end_prim_preds() const { return m_prim_preds.end(); }
|
||||
|
||||
void get_muxed_cube_from_model(const model_core & model, expr_ref_vector & res) const;
|
||||
|
||||
std::string pp_model(const model_core & mdl) const;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
360
src/muz/spacer/spacer_unsat_core_learner.cpp
Normal file
360
src/muz/spacer/spacer_unsat_core_learner.cpp
Normal file
|
@ -0,0 +1,360 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_unsat_core_learner.cpp
|
||||
|
||||
Abstract:
|
||||
itp cores
|
||||
|
||||
Author:
|
||||
Bernhard Gleiss
|
||||
|
||||
Revision History:
|
||||
|
||||
|
||||
--*/
|
||||
#include "spacer_unsat_core_learner.h"
|
||||
|
||||
#include "spacer_unsat_core_plugin.h"
|
||||
|
||||
#include "proof_utils.h"
|
||||
#include "for_each_expr.h"
|
||||
#include <unordered_map>
|
||||
namespace spacer
|
||||
{
|
||||
|
||||
#pragma mark - proof iterators
|
||||
|
||||
# pragma mark - main methods
|
||||
unsat_core_learner::~unsat_core_learner()
|
||||
{
|
||||
std::for_each(m_plugins.begin(), m_plugins.end(), delete_proc<unsat_core_plugin>());
|
||||
|
||||
}
|
||||
|
||||
void unsat_core_learner::register_plugin(unsat_core_plugin* plugin)
|
||||
{
|
||||
m_plugins.push_back(plugin);
|
||||
}
|
||||
|
||||
void unsat_core_learner::compute_unsat_core(proof *root, expr_set& asserted_b, expr_ref_vector& unsat_core)
|
||||
{
|
||||
// transform proof in order to get a proof which is better suited for unsat-core-extraction
|
||||
proof_ref pr(root, m);
|
||||
|
||||
spacer::reduce_hypotheses(pr);
|
||||
STRACE("spacer.unsat_core_learner",
|
||||
verbose_stream() << "Reduced proof:\n" << mk_ismt2_pp(pr, m) << "\n";
|
||||
);
|
||||
|
||||
// compute symbols occuring in B
|
||||
collect_symbols_b(asserted_b);
|
||||
|
||||
// traverse proof
|
||||
ProofIteratorPostOrder it(root, m);
|
||||
while (it.hasNext())
|
||||
{
|
||||
proof* currentNode = it.next();
|
||||
|
||||
if (m.get_num_parents(currentNode) == 0)
|
||||
{
|
||||
switch(currentNode->get_decl_kind())
|
||||
{
|
||||
|
||||
case PR_ASSERTED: // currentNode is an axiom
|
||||
{
|
||||
if (asserted_b.contains(m.get_fact(currentNode)))
|
||||
{
|
||||
m_b_mark.mark(currentNode, true);
|
||||
}
|
||||
else
|
||||
{
|
||||
m_a_mark.mark(currentNode, true);
|
||||
}
|
||||
break;
|
||||
}
|
||||
// currentNode is a hypothesis:
|
||||
case PR_HYPOTHESIS:
|
||||
{
|
||||
m_h_mark.mark(currentNode, true);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// collect from parents whether derivation of current node contains A-axioms, B-axioms and hypothesis
|
||||
bool need_to_mark_a = false;
|
||||
bool need_to_mark_b = false;
|
||||
bool need_to_mark_h = false;
|
||||
bool need_to_mark_closed = true;
|
||||
|
||||
for (unsigned i = 0; i < m.get_num_parents(currentNode); ++i)
|
||||
{
|
||||
SASSERT(m.is_proof(currentNode->get_arg(i)));
|
||||
proof* premise = to_app(currentNode->get_arg(i));
|
||||
|
||||
need_to_mark_a = need_to_mark_a || m_a_mark.is_marked(premise);
|
||||
need_to_mark_b = need_to_mark_b || m_b_mark.is_marked(premise);
|
||||
need_to_mark_h = need_to_mark_h || m_h_mark.is_marked(premise);
|
||||
need_to_mark_closed = need_to_mark_closed && (!m_b_mark.is_marked(premise) || m_closed.is_marked(premise));
|
||||
}
|
||||
|
||||
// if current node is application of lemma, we know that all hypothesis are removed
|
||||
if(currentNode->get_decl_kind() == PR_LEMMA)
|
||||
{
|
||||
need_to_mark_h = false;
|
||||
}
|
||||
|
||||
// save results
|
||||
m_a_mark.mark(currentNode, need_to_mark_a);
|
||||
m_b_mark.mark(currentNode, need_to_mark_b);
|
||||
m_h_mark.mark(currentNode, need_to_mark_h);
|
||||
m_closed.mark(currentNode, need_to_mark_closed);
|
||||
}
|
||||
|
||||
// we have now collected all necessary information, so we can visit the node
|
||||
// if the node mixes A-reasoning and B-reasoning and contains non-closed premises
|
||||
if (m_a_mark.is_marked(currentNode) && m_b_mark.is_marked(currentNode) && !m_closed.is_marked(currentNode))
|
||||
{
|
||||
compute_partial_core(currentNode); // then we need to compute a partial core
|
||||
// SASSERT(!(m_a_mark.is_marked(currentNode) && m_b_mark.is_marked(currentNode)) || m_closed.is_marked(currentNode)); TODO: doesn't hold anymore if we do the mincut-thing!
|
||||
}
|
||||
}
|
||||
|
||||
// give plugins chance to finalize their unsat-core-computation
|
||||
finalize();
|
||||
|
||||
// TODO: remove duplicates from unsat core?
|
||||
|
||||
bool debug_proof = false;
|
||||
if(debug_proof)
|
||||
{
|
||||
// print proof for debugging
|
||||
verbose_stream() << "\n\nProof:\n";
|
||||
std::unordered_map<unsigned, unsigned> id_to_small_id;
|
||||
unsigned counter = 0;
|
||||
|
||||
ProofIteratorPostOrder it2(root, m);
|
||||
while (it2.hasNext())
|
||||
{
|
||||
proof* currentNode = it2.next();
|
||||
|
||||
SASSERT(id_to_small_id.find(currentNode->get_id()) == id_to_small_id.end());
|
||||
id_to_small_id.insert(std::make_pair(currentNode->get_id(), counter));
|
||||
|
||||
verbose_stream() << counter << " ";
|
||||
verbose_stream() << "[";
|
||||
if (is_a_marked(currentNode))
|
||||
{
|
||||
verbose_stream() << "a";
|
||||
}
|
||||
if (is_b_marked(currentNode))
|
||||
{
|
||||
verbose_stream() << "b";
|
||||
}
|
||||
if (is_h_marked(currentNode))
|
||||
{
|
||||
verbose_stream() << "h";
|
||||
}
|
||||
if (is_closed(currentNode))
|
||||
{
|
||||
verbose_stream() << "c";
|
||||
}
|
||||
verbose_stream() << "] ";
|
||||
|
||||
if (m.get_num_parents(currentNode) == 0)
|
||||
{
|
||||
switch (currentNode->get_decl_kind())
|
||||
{
|
||||
case PR_ASSERTED:
|
||||
verbose_stream() << "asserted";
|
||||
break;
|
||||
case PR_HYPOTHESIS:
|
||||
verbose_stream() << "hypothesis";
|
||||
break;
|
||||
default:
|
||||
verbose_stream() << "unknown axiom-type";
|
||||
break;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (currentNode->get_decl_kind() == PR_LEMMA)
|
||||
{
|
||||
verbose_stream() << "lemma";
|
||||
}
|
||||
else if (currentNode->get_decl_kind() == PR_TH_LEMMA)
|
||||
{
|
||||
verbose_stream() << "th_lemma";
|
||||
func_decl* d = currentNode->get_decl();
|
||||
symbol sym;
|
||||
if (d->get_num_parameters() >= 2 && // the Farkas coefficients are saved in the parameters of step
|
||||
d->get_parameter(0).is_symbol(sym) && sym == "arith" && // the first two parameters are "arith", "farkas",
|
||||
d->get_parameter(1).is_symbol(sym) && sym == "farkas")
|
||||
{
|
||||
verbose_stream() << "(farkas)";
|
||||
}
|
||||
else
|
||||
{
|
||||
verbose_stream() << "(other)";
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
verbose_stream() << "step";
|
||||
}
|
||||
verbose_stream() << " from ";
|
||||
for (int i = m.get_num_parents(currentNode) - 1; i >= 0 ; --i)
|
||||
{
|
||||
proof* premise = to_app(currentNode->get_arg(i));
|
||||
unsigned premise_small_id = id_to_small_id[premise->get_id()];
|
||||
if (i > 0)
|
||||
{
|
||||
verbose_stream() << premise_small_id << ", ";
|
||||
}
|
||||
else
|
||||
{
|
||||
verbose_stream() << premise_small_id;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
if (currentNode->get_decl_kind() == PR_TH_LEMMA || (is_a_marked(currentNode) && is_b_marked(currentNode)) || is_h_marked(currentNode) || (!is_a_marked(currentNode) && !is_b_marked(currentNode)))
|
||||
{
|
||||
verbose_stream() << std::endl;
|
||||
}
|
||||
else
|
||||
{
|
||||
verbose_stream() << ": " << mk_pp(m.get_fact(currentNode), m) << std::endl;
|
||||
}
|
||||
++counter;
|
||||
}
|
||||
}
|
||||
// move all lemmas into vector
|
||||
for (expr* const* it = m_unsat_core.begin(); it != m_unsat_core.end(); ++it)
|
||||
{
|
||||
unsat_core.push_back(*it);
|
||||
}
|
||||
}
|
||||
|
||||
void unsat_core_learner::compute_partial_core(proof* step)
|
||||
{
|
||||
for (unsat_core_plugin** it=m_plugins.begin(), **end = m_plugins.end (); it != end && !m_closed.is_marked(step); ++it)
|
||||
{
|
||||
unsat_core_plugin* plugin = *it;
|
||||
plugin->compute_partial_core(step);
|
||||
}
|
||||
}
|
||||
|
||||
void unsat_core_learner::finalize()
|
||||
{
|
||||
for (unsat_core_plugin** it=m_plugins.begin(); it != m_plugins.end(); ++it)
|
||||
{
|
||||
unsat_core_plugin* plugin = *it;
|
||||
plugin->finalize();
|
||||
}
|
||||
}
|
||||
|
||||
#pragma mark - API
|
||||
|
||||
bool unsat_core_learner::is_a_marked(proof* p)
|
||||
{
|
||||
return m_a_mark.is_marked(p);
|
||||
}
|
||||
bool unsat_core_learner::is_b_marked(proof* p)
|
||||
{
|
||||
return m_b_mark.is_marked(p);
|
||||
}
|
||||
bool unsat_core_learner::is_h_marked(proof* p)
|
||||
{
|
||||
return m_h_mark.is_marked(p);
|
||||
}
|
||||
bool unsat_core_learner::is_closed(proof*p)
|
||||
{
|
||||
return m_closed.is_marked(p);
|
||||
}
|
||||
void unsat_core_learner::set_closed(proof* p, bool value)
|
||||
{
|
||||
m_closed.mark(p, value);
|
||||
}
|
||||
|
||||
void unsat_core_learner::add_lemma_to_core(expr* lemma)
|
||||
{
|
||||
m_unsat_core.push_back(lemma);
|
||||
}
|
||||
|
||||
# pragma mark - checking for b_symbols
|
||||
|
||||
class collect_pure_proc {
|
||||
func_decl_set& m_symbs;
|
||||
public:
|
||||
collect_pure_proc(func_decl_set& s):m_symbs(s) {}
|
||||
|
||||
void operator()(app* a) {
|
||||
if (a->get_family_id() == null_family_id) {
|
||||
m_symbs.insert(a->get_decl());
|
||||
}
|
||||
}
|
||||
void operator()(var*) {}
|
||||
void operator()(quantifier*) {}
|
||||
};
|
||||
|
||||
void unsat_core_learner::collect_symbols_b(expr_set axioms_b)
|
||||
{
|
||||
expr_mark visited;
|
||||
collect_pure_proc proc(m_symbols_b);
|
||||
for (expr_set::iterator it = axioms_b.begin(); it != axioms_b.end(); ++it)
|
||||
{
|
||||
for_each_expr(proc, visited, *it);
|
||||
}
|
||||
}
|
||||
|
||||
class is_pure_expr_proc {
|
||||
func_decl_set const& m_symbs;
|
||||
array_util m_au;
|
||||
public:
|
||||
struct non_pure {};
|
||||
|
||||
is_pure_expr_proc(func_decl_set const& s, ast_manager& m):
|
||||
m_symbs(s),
|
||||
m_au (m)
|
||||
{}
|
||||
|
||||
void operator()(app* a) {
|
||||
if (a->get_family_id() == null_family_id) {
|
||||
if (!m_symbs.contains(a->get_decl())) {
|
||||
throw non_pure();
|
||||
}
|
||||
}
|
||||
else if (a->get_family_id () == m_au.get_family_id () &&
|
||||
a->is_app_of (a->get_family_id (), OP_ARRAY_EXT)) {
|
||||
throw non_pure();
|
||||
}
|
||||
}
|
||||
void operator()(var*) {}
|
||||
void operator()(quantifier*) {}
|
||||
};
|
||||
|
||||
bool unsat_core_learner::only_contains_symbols_b(expr* expr) const
|
||||
{
|
||||
is_pure_expr_proc proc(m_symbols_b, m);
|
||||
try {
|
||||
for_each_expr(proc, expr);
|
||||
}
|
||||
catch (is_pure_expr_proc::non_pure)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
107
src/muz/spacer/spacer_unsat_core_learner.h
Normal file
107
src/muz/spacer/spacer_unsat_core_learner.h
Normal file
|
@ -0,0 +1,107 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_unsat_core_learner.h
|
||||
|
||||
Abstract:
|
||||
itp cores
|
||||
|
||||
Author:
|
||||
Bernhard Gleiss
|
||||
|
||||
Revision History:
|
||||
|
||||
|
||||
--*/
|
||||
#ifndef _SPACER_UNSAT_CORE_LEARNER_H_
|
||||
#define _SPACER_UNSAT_CORE_LEARNER_H_
|
||||
|
||||
#include "ast.h"
|
||||
#include "spacer_util.h"
|
||||
#include "spacer_proof_utils.h"
|
||||
|
||||
namespace spacer {
|
||||
|
||||
|
||||
class unsat_core_plugin;
|
||||
class unsat_core_learner
|
||||
{
|
||||
typedef obj_hashtable<expr> expr_set;
|
||||
|
||||
public:
|
||||
unsat_core_learner(ast_manager& m) : m(m), m_unsat_core(m) {};
|
||||
virtual ~unsat_core_learner();
|
||||
|
||||
ast_manager& m;
|
||||
|
||||
/*
|
||||
* register a plugin for computation of partial unsat cores
|
||||
* currently plugins are called in the order they have been registered
|
||||
*/
|
||||
void register_plugin(unsat_core_plugin* plugin);
|
||||
|
||||
/*
|
||||
* compute unsat core using the registered unsat-core-plugins
|
||||
*/
|
||||
void compute_unsat_core(proof* root, expr_set& asserted_b, expr_ref_vector& unsat_core);
|
||||
|
||||
/*
|
||||
* getter/setter methods for data structures exposed to plugins
|
||||
* the following invariants can be assumed and need to be maintained by the plugins:
|
||||
* - a node is a-marked iff it is derived using at least one asserted proof step from A.
|
||||
* - a node is b-marked iff its derivation contains no asserted proof steps from A and
|
||||
* no hypothesis (with the additional complication that lemmas conceptually remove hypothesis)
|
||||
* - a node is h-marked, iff it is derived using at least one hypothesis
|
||||
* - a node is closed, iff it has already been interpolated, i.e. its contribution is
|
||||
* already covered by the unsat-core.
|
||||
*/
|
||||
bool is_a_marked(proof* p);
|
||||
bool is_b_marked(proof* p);
|
||||
bool is_h_marked(proof* p);
|
||||
bool is_closed(proof* p);
|
||||
void set_closed(proof* p, bool value);
|
||||
|
||||
/*
|
||||
* adds a lemma to the unsat core
|
||||
*/
|
||||
void add_lemma_to_core(expr* lemma);
|
||||
|
||||
/*
|
||||
* helper method, which can be used by plugins
|
||||
* returns true iff all symbols of expr occur in some b-asserted formula.
|
||||
* must only be called after a call to collect_symbols_b.
|
||||
*/
|
||||
bool only_contains_symbols_b(expr* expr) const;
|
||||
bool is_b_pure (proof *p)
|
||||
{return !is_h_marked (p) && only_contains_symbols_b (m.get_fact (p));}
|
||||
bool is_b_open (proof *p)
|
||||
{ return is_b_marked (p) && !is_closed (p); }
|
||||
|
||||
private:
|
||||
ptr_vector<unsat_core_plugin> m_plugins;
|
||||
func_decl_set m_symbols_b; // symbols, which occur in any b-asserted formula
|
||||
void collect_symbols_b(expr_set axioms_b);
|
||||
|
||||
ast_mark m_a_mark;
|
||||
ast_mark m_b_mark;
|
||||
ast_mark m_h_mark;
|
||||
ast_mark m_closed;
|
||||
|
||||
expr_ref_vector m_unsat_core; // collects the lemmas of the unsat-core, will at the end be inserted into unsat_core.
|
||||
|
||||
/*
|
||||
* computes partial core for step by delegating computation to plugins
|
||||
*/
|
||||
void compute_partial_core(proof* step);
|
||||
|
||||
/*
|
||||
* finalize computation of unsat-core
|
||||
*/
|
||||
void finalize();
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
776
src/muz/spacer/spacer_unsat_core_plugin.cpp
Normal file
776
src/muz/spacer/spacer_unsat_core_plugin.cpp
Normal file
|
@ -0,0 +1,776 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_unsat_core_plugin.cpp
|
||||
|
||||
Abstract:
|
||||
plugin for itp cores
|
||||
|
||||
Author:
|
||||
Bernhard Gleiss
|
||||
|
||||
Revision History:
|
||||
|
||||
|
||||
--*/
|
||||
#include "spacer_unsat_core_plugin.h"
|
||||
|
||||
#include "spacer_unsat_core_learner.h"
|
||||
|
||||
#include "smt_farkas_util.h"
|
||||
#include "bool_rewriter.h"
|
||||
#include "arith_decl_plugin.h"
|
||||
#include <set>
|
||||
#include "smt_solver.h"
|
||||
#include "solver.h"
|
||||
#include <limits>
|
||||
#include "spacer_proof_utils.h"
|
||||
#include "spacer_matrix.h"
|
||||
|
||||
namespace spacer
|
||||
{
|
||||
|
||||
#pragma mark - unsat_core_plugin_lemma
|
||||
|
||||
void unsat_core_plugin_lemma::compute_partial_core(proof* step)
|
||||
{
|
||||
SASSERT(m_learner.is_a_marked(step));
|
||||
SASSERT(m_learner.is_b_marked(step));
|
||||
|
||||
for (unsigned i = 0; i < m_learner.m.get_num_parents(step); ++i)
|
||||
{
|
||||
SASSERT(m_learner.m.is_proof(step->get_arg(i)));
|
||||
proof* premise = to_app(step->get_arg(i));
|
||||
|
||||
if (m_learner.is_b_open (premise))
|
||||
{
|
||||
// by IH, premises that are AB marked are already closed
|
||||
SASSERT(!m_learner.is_a_marked(premise));
|
||||
add_lowest_split_to_core(premise);
|
||||
}
|
||||
}
|
||||
m_learner.set_closed(step, true);
|
||||
}
|
||||
|
||||
void unsat_core_plugin_lemma::add_lowest_split_to_core(proof* step) const
|
||||
{
|
||||
SASSERT(m_learner.is_b_open(step));
|
||||
ast_manager &m = m_learner.m;
|
||||
|
||||
ptr_vector<proof> todo;
|
||||
todo.push_back(step);
|
||||
|
||||
while (!todo.empty())
|
||||
{
|
||||
proof* pf = todo.back();
|
||||
todo.pop_back();
|
||||
|
||||
// if current step hasn't been processed,
|
||||
if (!m_learner.is_closed(pf))
|
||||
{
|
||||
m_learner.set_closed(pf, true);
|
||||
// the step is b-marked and not closed.
|
||||
// by I.H. the step must be already visited
|
||||
// so if it is also a-marked, it must be closed
|
||||
SASSERT(m_learner.is_b_marked(pf));
|
||||
SASSERT(!m_learner.is_a_marked(pf));
|
||||
|
||||
// the current step needs to be interpolated:
|
||||
expr* fact = m_learner.m.get_fact(pf);
|
||||
// if we trust the current step and we are able to use it
|
||||
if (m_learner.is_b_pure (pf) &&
|
||||
(m.is_asserted(pf) || is_literal(m, fact)))
|
||||
{
|
||||
// just add it to the core
|
||||
m_learner.add_lemma_to_core(fact);
|
||||
}
|
||||
// otherwise recurse on premises
|
||||
else
|
||||
{
|
||||
for (unsigned i = 0, sz = m_learner.m.get_num_parents(pf);
|
||||
i < sz; ++i)
|
||||
{
|
||||
SASSERT(m_learner.m.is_proof(pf->get_arg(i)));
|
||||
proof* premise = m.get_parent (pf, i);
|
||||
if (m_learner.is_b_open(premise)) {
|
||||
todo.push_back(premise);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#pragma mark - unsat_core_plugin_farkas_lemma
|
||||
void unsat_core_plugin_farkas_lemma::compute_partial_core(proof* step)
|
||||
{
|
||||
ast_manager &m = m_learner.m;
|
||||
SASSERT(m_learner.is_a_marked(step));
|
||||
SASSERT(m_learner.is_b_marked(step));
|
||||
// XXX this assertion should be true so there is no need to check for it
|
||||
SASSERT (!m_learner.is_closed (step));
|
||||
func_decl* d = step->get_decl();
|
||||
symbol sym;
|
||||
if(!m_learner.is_closed(step) && // if step is not already interpolated
|
||||
step->get_decl_kind() == PR_TH_LEMMA && // and step is a Farkas lemma
|
||||
d->get_num_parameters() >= 2 && // the Farkas coefficients are saved in the parameters of step
|
||||
d->get_parameter(0).is_symbol(sym) && sym == "arith" && // the first two parameters are "arith", "farkas",
|
||||
d->get_parameter(1).is_symbol(sym) && sym == "farkas" &&
|
||||
d->get_num_parameters() >= m_learner.m.get_num_parents(step) + 2) // the following parameters are the Farkas coefficients
|
||||
{
|
||||
SASSERT(m_learner.m.has_fact(step));
|
||||
|
||||
ptr_vector<app> literals;
|
||||
vector<rational> coefficients;
|
||||
|
||||
/* The farkas lemma represents a subproof starting from premise(-set)s A, BNP and BP(ure) and
|
||||
* ending in a disjunction D. We need to compute the contribution of BP, i.e. a formula, which
|
||||
* is entailed by BP and together with A and BNP entails D.
|
||||
*
|
||||
* Let Fark(F) be the farkas coefficient for F. We can use the fact that
|
||||
* (A*Fark(A) + BNP*Fark(BNP) + BP*Fark(BP) + (neg D)*Fark(D)) => false. (E1)
|
||||
* We further have that A+B => C implies (A \land B) => C. (E2)
|
||||
*
|
||||
* Alternative 1:
|
||||
* From (E1) immediately get that BP*Fark(BP) is a solution.
|
||||
*
|
||||
* Alternative 2:
|
||||
* We can rewrite (E2) to rewrite (E1) to
|
||||
* (BP*Fark(BP)) => (neg(A*Fark(A) + BNP*Fark(BNP) + (neg D)*Fark(D))) (E3)
|
||||
* and since we can derive (A*Fark(A) + BNP*Fark(BNP) + (neg D)*Fark(D)) from
|
||||
* A, BNP and D, we also know that it is inconsisent. Therefore
|
||||
* neg(A*Fark(A) + BNP*Fark(BNP) + (neg D)*Fark(D)) is a solution.
|
||||
*
|
||||
* Finally we also need the following workaround:
|
||||
* 1) Although we know from theory, that the Farkas coefficients are always nonnegative,
|
||||
* the Farkas coefficients provided by arith_core are sometimes negative (must be a bug)
|
||||
* as workaround we take the absolute value of the provided coefficients.
|
||||
*/
|
||||
parameter const* params = d->get_parameters() + 2; // point to the first Farkas coefficient
|
||||
|
||||
STRACE("spacer.farkas",
|
||||
verbose_stream() << "Farkas input: "<< "\n";
|
||||
for (unsigned i = 0; i < m_learner.m.get_num_parents(step); ++i)
|
||||
{
|
||||
SASSERT(m_learner.m.is_proof(step->get_arg(i)));
|
||||
proof *prem = m.get_parent (step, i);
|
||||
|
||||
rational coef;
|
||||
VERIFY(params[i].is_rational(coef));
|
||||
|
||||
bool b_pure = m_learner.is_b_pure (prem);
|
||||
verbose_stream() << (b_pure?"B":"A") << " " << coef << " " << mk_pp(m_learner.m.get_fact(prem), m_learner.m) << "\n";
|
||||
}
|
||||
);
|
||||
|
||||
bool can_be_closed = true;
|
||||
|
||||
for(unsigned i = 0; i < m.get_num_parents(step); ++i)
|
||||
{
|
||||
SASSERT(m_learner.m.is_proof(step->get_arg(i)));
|
||||
proof * premise = m.get_parent (step, i);
|
||||
|
||||
if (m_learner.is_b_open (premise))
|
||||
{
|
||||
SASSERT(!m_learner.is_a_marked(premise));
|
||||
|
||||
if (m_learner.is_b_pure (step))
|
||||
{
|
||||
if (!m_use_constant_from_a)
|
||||
{
|
||||
rational coefficient;
|
||||
VERIFY(params[i].is_rational(coefficient));
|
||||
literals.push_back(to_app(m_learner.m.get_fact(premise)));
|
||||
coefficients.push_back(abs(coefficient));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
can_be_closed = false;
|
||||
|
||||
if (m_use_constant_from_a)
|
||||
{
|
||||
rational coefficient;
|
||||
VERIFY(params[i].is_rational(coefficient));
|
||||
literals.push_back(to_app(m_learner.m.get_fact(premise)));
|
||||
coefficients.push_back(abs(coefficient));
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (m_use_constant_from_a)
|
||||
{
|
||||
rational coefficient;
|
||||
VERIFY(params[i].is_rational(coefficient));
|
||||
literals.push_back(to_app(m_learner.m.get_fact(premise)));
|
||||
coefficients.push_back(abs(coefficient));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (m_use_constant_from_a)
|
||||
{
|
||||
params += m_learner.m.get_num_parents(step); // point to the first Farkas coefficient, which corresponds to a formula in the conclusion
|
||||
|
||||
// the conclusion can either be a single formula or a disjunction of several formulas, we have to deal with both situations
|
||||
if (m_learner.m.get_num_parents(step) + 2 < d->get_num_parameters())
|
||||
{
|
||||
unsigned num_args = 1;
|
||||
expr* conclusion = m_learner.m.get_fact(step);
|
||||
expr* const* args = &conclusion;
|
||||
if (m_learner.m.is_or(conclusion))
|
||||
{
|
||||
app* _or = to_app(conclusion);
|
||||
num_args = _or->get_num_args();
|
||||
args = _or->get_args();
|
||||
}
|
||||
SASSERT(m_learner.m.get_num_parents(step) + 2 + num_args == d->get_num_parameters());
|
||||
|
||||
bool_rewriter brw(m_learner.m);
|
||||
for (unsigned i = 0; i < num_args; ++i)
|
||||
{
|
||||
expr* premise = args[i];
|
||||
|
||||
expr_ref negatedPremise(m_learner.m);
|
||||
brw.mk_not(premise, negatedPremise);
|
||||
literals.push_back(to_app(negatedPremise));
|
||||
|
||||
rational coefficient;
|
||||
VERIFY(params[i].is_rational(coefficient));
|
||||
coefficients.push_back(abs(coefficient));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// only if all b-premises can be used directly, add the farkas core and close the step
|
||||
if (can_be_closed)
|
||||
{
|
||||
m_learner.set_closed(step, true);
|
||||
|
||||
expr_ref res(m_learner.m);
|
||||
compute_linear_combination(coefficients, literals, res);
|
||||
|
||||
m_learner.add_lemma_to_core(res);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void unsat_core_plugin_farkas_lemma::compute_linear_combination(const vector<rational>& coefficients, const ptr_vector<app>& literals, expr_ref& res)
|
||||
{
|
||||
SASSERT(literals.size() == coefficients.size());
|
||||
|
||||
ast_manager& m = res.get_manager();
|
||||
smt::farkas_util util(m);
|
||||
if (m_use_constant_from_a)
|
||||
{
|
||||
util.set_split_literals (m_split_literals); // small optimization: if flag m_split_literals is set, then preserve diff constraints
|
||||
}
|
||||
for(unsigned i = 0; i < literals.size(); ++i)
|
||||
{
|
||||
util.add(coefficients[i], literals[i]);
|
||||
}
|
||||
if (m_use_constant_from_a)
|
||||
{
|
||||
res = util.get();
|
||||
}
|
||||
else
|
||||
{
|
||||
expr_ref negated_linear_combination = util.get();
|
||||
res = mk_not(m, negated_linear_combination);
|
||||
}
|
||||
}
|
||||
|
||||
#pragma mark - unsat_core_plugin_farkas_optimized
|
||||
void unsat_core_plugin_farkas_lemma_optimized::compute_partial_core(proof* step)
|
||||
{
|
||||
SASSERT(m_learner.is_a_marked(step));
|
||||
SASSERT(m_learner.is_b_marked(step));
|
||||
|
||||
func_decl* d = step->get_decl();
|
||||
symbol sym;
|
||||
if(!m_learner.is_closed(step) && // if step is not already interpolated
|
||||
step->get_decl_kind() == PR_TH_LEMMA && // and step is a Farkas lemma
|
||||
d->get_num_parameters() >= 2 && // the Farkas coefficients are saved in the parameters of step
|
||||
d->get_parameter(0).is_symbol(sym) && sym == "arith" && // the first two parameters are "arith", "farkas",
|
||||
d->get_parameter(1).is_symbol(sym) && sym == "farkas" &&
|
||||
d->get_num_parameters() >= m_learner.m.get_num_parents(step) + 2) // the following parameters are the Farkas coefficients
|
||||
{
|
||||
SASSERT(m_learner.m.has_fact(step));
|
||||
|
||||
vector<std::pair<app*,rational> > linear_combination; // collects all summands of the linear combination
|
||||
|
||||
parameter const* params = d->get_parameters() + 2; // point to the first Farkas coefficient
|
||||
|
||||
STRACE("spacer.farkas",
|
||||
verbose_stream() << "Farkas input: "<< "\n";
|
||||
for (unsigned i = 0; i < m_learner.m.get_num_parents(step); ++i)
|
||||
{
|
||||
SASSERT(m_learner.m.is_proof(step->get_arg(i)));
|
||||
proof *prem = m.get_parent (step, i);
|
||||
|
||||
rational coef;
|
||||
VERIFY(params[i].is_rational(coef));
|
||||
|
||||
bool b_pure = m_learner.is_b_pure (prem);
|
||||
verbose_stream() << (b_pure?"B":"A") << " " << coef << " " << mk_pp(m_learner.m.get_fact(prem), m_learner.m) << "\n";
|
||||
}
|
||||
);
|
||||
|
||||
bool can_be_closed = true;
|
||||
for(unsigned i = 0; i < m_learner.m.get_num_parents(step); ++i)
|
||||
{
|
||||
SASSERT(m_learner.m.is_proof(step->get_arg(i)));
|
||||
proof * premise = m.get_parent (step, i);
|
||||
|
||||
if (m_learner.is_b_marked(premise) && !m_learner.is_closed(premise))
|
||||
{
|
||||
SASSERT(!m_learner.is_a_marked(premise));
|
||||
|
||||
// XXX AG: why is this condition is based on step and not on premise?
|
||||
if (m_learner.only_contains_symbols_b(m_learner.m.get_fact(step)) && !m_learner.is_h_marked(step))
|
||||
{
|
||||
rational coefficient;
|
||||
VERIFY(params[i].is_rational(coefficient));
|
||||
linear_combination.push_back(std::make_pair(to_app(m_learner.m.get_fact(premise)), abs(coefficient)));
|
||||
}
|
||||
else
|
||||
{
|
||||
can_be_closed = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// only if all b-premises can be used directly, close the step and add linear combinations for later processing
|
||||
if (can_be_closed)
|
||||
{
|
||||
m_learner.set_closed(step, true);
|
||||
if (!linear_combination.empty())
|
||||
{
|
||||
m_linear_combinations.push_back(linear_combination);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct farkas_optimized_less_than_pairs
|
||||
{
|
||||
inline bool operator() (const std::pair<app*,rational>& pair1, const std::pair<app*,rational>& pair2) const
|
||||
{
|
||||
return (pair1.first->get_id() < pair2.first->get_id());
|
||||
}
|
||||
};
|
||||
|
||||
void unsat_core_plugin_farkas_lemma_optimized::finalize()
|
||||
{
|
||||
if(m_linear_combinations.empty())
|
||||
{
|
||||
return;
|
||||
}
|
||||
DEBUG_CODE(
|
||||
for (auto& linear_combination : m_linear_combinations) {
|
||||
SASSERT(linear_combination.size() > 0);
|
||||
});
|
||||
|
||||
// 1. construct ordered basis
|
||||
ptr_vector<app> ordered_basis;
|
||||
obj_map<app, unsigned> map;
|
||||
unsigned counter = 0;
|
||||
for (const auto& linear_combination : m_linear_combinations)
|
||||
{
|
||||
for (const auto& pair : linear_combination)
|
||||
{
|
||||
if (!map.contains(pair.first))
|
||||
{
|
||||
ordered_basis.push_back(pair.first);
|
||||
map.insert(pair.first, counter++);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 2. populate matrix
|
||||
spacer_matrix matrix(m_linear_combinations.size(), ordered_basis.size());
|
||||
|
||||
for (unsigned i=0; i < m_linear_combinations.size(); ++i)
|
||||
{
|
||||
auto linear_combination = m_linear_combinations[i];
|
||||
for (const auto& pair : linear_combination)
|
||||
{
|
||||
matrix.set(i, map[pair.first], pair.second);
|
||||
}
|
||||
}
|
||||
|
||||
// 3. perform gaussian elimination
|
||||
unsigned i = matrix.perform_gaussian_elimination();
|
||||
|
||||
// 4. extract linear combinations from matrix and add result to core
|
||||
for (unsigned k=0; k < i; k++)// i points to the row after the last row which is non-zero
|
||||
{
|
||||
ptr_vector<app> literals;
|
||||
vector<rational> coefficients;
|
||||
for (unsigned l=0; l < matrix.num_cols(); ++l)
|
||||
{
|
||||
if (!matrix.get(k,l).is_zero())
|
||||
{
|
||||
literals.push_back(ordered_basis[l]);
|
||||
coefficients.push_back(matrix.get(k,l));
|
||||
}
|
||||
}
|
||||
SASSERT(literals.size() > 0);
|
||||
expr_ref linear_combination(m);
|
||||
compute_linear_combination(coefficients, literals, linear_combination);
|
||||
|
||||
m_learner.add_lemma_to_core(linear_combination);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void unsat_core_plugin_farkas_lemma_optimized::compute_linear_combination(const vector<rational>& coefficients, const ptr_vector<app>& literals, expr_ref& res)
|
||||
{
|
||||
SASSERT(literals.size() == coefficients.size());
|
||||
|
||||
ast_manager& m = res.get_manager();
|
||||
smt::farkas_util util(m);
|
||||
for(unsigned i = 0; i < literals.size(); ++i)
|
||||
{
|
||||
util.add(coefficients[i], literals[i]);
|
||||
}
|
||||
expr_ref negated_linear_combination = util.get();
|
||||
SASSERT(m.is_not(negated_linear_combination));
|
||||
res = mk_not(m, negated_linear_combination); //TODO: rewrite the get-method to return nonnegated stuff?
|
||||
}
|
||||
|
||||
#pragma mark - unsat_core_plugin_farkas_bounded
|
||||
|
||||
void unsat_core_plugin_farkas_lemma_bounded::finalize()
|
||||
{
|
||||
if(m_linear_combinations.empty())
|
||||
{return;}
|
||||
DEBUG_CODE(
|
||||
for (auto& linear_combination : m_linear_combinations) {
|
||||
SASSERT(linear_combination.size() > 0);
|
||||
});
|
||||
|
||||
// 1. construct ordered basis
|
||||
ptr_vector<app> ordered_basis;
|
||||
obj_map<app, unsigned> map;
|
||||
unsigned counter = 0;
|
||||
for (const auto& linear_combination : m_linear_combinations)
|
||||
{
|
||||
for (const auto& pair : linear_combination)
|
||||
{
|
||||
if (!map.contains(pair.first))
|
||||
{
|
||||
ordered_basis.push_back(pair.first);
|
||||
map.insert(pair.first, counter++);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 2. populate matrix
|
||||
spacer_matrix matrix(m_linear_combinations.size(), ordered_basis.size());
|
||||
|
||||
for (unsigned i=0; i < m_linear_combinations.size(); ++i)
|
||||
{
|
||||
auto linear_combination = m_linear_combinations[i];
|
||||
for (const auto& pair : linear_combination)
|
||||
{
|
||||
matrix.set(i, map[pair.first], pair.second);
|
||||
}
|
||||
}
|
||||
matrix.print_matrix();
|
||||
|
||||
// 3. normalize matrix to integer values
|
||||
matrix.normalize();
|
||||
|
||||
|
||||
arith_util util(m);
|
||||
|
||||
vector<expr_ref_vector> coeffs;
|
||||
for (unsigned i=0; i < matrix.num_rows(); ++i)
|
||||
{
|
||||
coeffs.push_back(expr_ref_vector(m));
|
||||
}
|
||||
|
||||
vector<expr_ref_vector> bounded_vectors;
|
||||
for (unsigned j=0; j < matrix.num_cols(); ++j)
|
||||
{
|
||||
bounded_vectors.push_back(expr_ref_vector(m));
|
||||
}
|
||||
|
||||
// 4. find smallest n using guess and check algorithm
|
||||
for(unsigned n = 1; true; ++n)
|
||||
{
|
||||
params_ref p;
|
||||
p.set_bool("model", true);
|
||||
scoped_ptr<solver> s = mk_smt_solver(m, p, symbol::null); // TODO: incremental version?
|
||||
|
||||
// add new variables w_in,
|
||||
for (unsigned i=0; i < matrix.num_rows(); ++i)
|
||||
{
|
||||
std::string name = "w_" + std::to_string(i) + std::to_string(n);
|
||||
|
||||
func_decl_ref decl(m);
|
||||
decl = m.mk_func_decl(symbol(name.c_str()), 0, (sort*const*)0, util.mk_int());
|
||||
coeffs[i].push_back(m.mk_const(decl));
|
||||
}
|
||||
|
||||
// we need s_jn
|
||||
for (unsigned j=0; j < matrix.num_cols(); ++j)
|
||||
{
|
||||
std::string name = "s_" + std::to_string(j) + std::to_string(n);
|
||||
|
||||
func_decl_ref decl(m);
|
||||
decl = m.mk_func_decl(symbol(name.c_str()), 0, (sort*const*)0, util.mk_int());
|
||||
|
||||
expr_ref s_jn(m);
|
||||
s_jn = m.mk_const(decl);
|
||||
|
||||
bounded_vectors[j].push_back(s_jn);
|
||||
}
|
||||
|
||||
// assert bounds for all s_jn
|
||||
for (unsigned l=0; l < n; ++l)
|
||||
{
|
||||
for (unsigned j=0; j < matrix.num_cols(); ++j)
|
||||
{
|
||||
expr* s_jn = bounded_vectors[j][l].get();
|
||||
|
||||
expr_ref lb(util.mk_le(util.mk_int(0), s_jn), m);
|
||||
expr_ref ub(util.mk_le(s_jn, util.mk_int(1)), m);
|
||||
s->assert_expr(lb);
|
||||
s->assert_expr(ub);
|
||||
}
|
||||
}
|
||||
|
||||
// assert: forall i,j: a_ij = sum_k w_ik * s_jk
|
||||
for (unsigned i=0; i < matrix.num_rows(); ++i)
|
||||
{
|
||||
for (unsigned j=0; j < matrix.num_cols(); ++j)
|
||||
{
|
||||
SASSERT(matrix.get(i, j).is_int());
|
||||
app_ref a_ij(util.mk_numeral(matrix.get(i,j), true),m);
|
||||
|
||||
app_ref sum(m);
|
||||
sum = util.mk_int(0);
|
||||
for (int k=0; k < n; ++k)
|
||||
{
|
||||
sum = util.mk_add(sum, util.mk_mul(coeffs[i][k].get(), bounded_vectors[j][k].get()));
|
||||
}
|
||||
expr_ref eq(m.mk_eq(a_ij, sum),m);
|
||||
s->assert_expr(eq);
|
||||
}
|
||||
}
|
||||
|
||||
// check result
|
||||
lbool res = s->check_sat(0,0);
|
||||
|
||||
// if sat extract model and add corresponding linear combinations to core
|
||||
if (res == lbool::l_true)
|
||||
{
|
||||
model_ref model;
|
||||
s->get_model(model);
|
||||
|
||||
for (int k=0; k < n; ++k)
|
||||
{
|
||||
ptr_vector<app> literals;
|
||||
vector<rational> coefficients;
|
||||
for (int j=0; j < matrix.num_cols(); ++j)
|
||||
{
|
||||
expr_ref evaluation(m);
|
||||
|
||||
model.get()->eval(bounded_vectors[j][k].get(), evaluation, false);
|
||||
if (!util.is_zero(evaluation))
|
||||
{
|
||||
literals.push_back(ordered_basis[j]);
|
||||
coefficients.push_back(rational(1));
|
||||
}
|
||||
}
|
||||
SASSERT(!literals.empty()); // since then previous outer loop would have found solution already
|
||||
expr_ref linear_combination(m);
|
||||
compute_linear_combination(coefficients, literals, linear_combination);
|
||||
|
||||
m_learner.add_lemma_to_core(linear_combination);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#pragma mark - unsat_core_plugin_min_cut
|
||||
unsat_core_plugin_min_cut::unsat_core_plugin_min_cut(unsat_core_learner& learner, ast_manager& m) : unsat_core_plugin(learner), m(m){}
|
||||
|
||||
void unsat_core_plugin_min_cut::compute_partial_core(proof* step)
|
||||
{
|
||||
ptr_vector<proof> todo;
|
||||
|
||||
SASSERT(m_learner.is_a_marked(step));
|
||||
SASSERT(m_learner.is_b_marked(step));
|
||||
SASSERT(m.get_num_parents(step) > 0);
|
||||
SASSERT(!m_learner.is_closed(step));
|
||||
todo.push_back(step);
|
||||
|
||||
while (!todo.empty())
|
||||
{
|
||||
proof* current = todo.back();
|
||||
todo.pop_back();
|
||||
|
||||
if (!m_learner.is_closed(current) && !m_visited.is_marked(current))
|
||||
{
|
||||
m_visited.mark(current, true);
|
||||
advance_to_lowest_partial_cut(current, todo);
|
||||
}
|
||||
}
|
||||
m_learner.set_closed(step, true);
|
||||
}
|
||||
|
||||
void unsat_core_plugin_min_cut::advance_to_lowest_partial_cut(proof* step, ptr_vector<proof>& todo2)
|
||||
{
|
||||
bool is_sink = true;
|
||||
|
||||
ast_manager &m = m_learner.m;
|
||||
ptr_vector<proof> todo;
|
||||
|
||||
for (unsigned i = 0, sz = m.get_num_parents(step); i < sz; ++i)
|
||||
{
|
||||
proof* premise = m.get_parent (step, i);
|
||||
{
|
||||
if (m_learner.is_b_marked(premise))
|
||||
{
|
||||
todo.push_back(premise);
|
||||
}
|
||||
}
|
||||
}
|
||||
while (!todo.empty())
|
||||
{
|
||||
proof* current = todo.back();
|
||||
todo.pop_back();
|
||||
|
||||
// if current step hasn't been processed,
|
||||
if (!m_learner.is_closed(current))
|
||||
{
|
||||
SASSERT(!m_learner.is_a_marked(current)); // by I.H. the step must be already visited
|
||||
|
||||
// and the current step needs to be interpolated:
|
||||
if (m_learner.is_b_marked(current))
|
||||
{
|
||||
// if we trust the current step and we are able to use it
|
||||
if (m_learner.is_b_pure (current) &&
|
||||
(m.is_asserted(current) ||
|
||||
is_literal(m, m.get_fact(current))))
|
||||
{
|
||||
// add corresponding edges and continue original traversel
|
||||
if (m_learner.is_a_marked(step))
|
||||
{
|
||||
add_edge(nullptr, current); // current is sink
|
||||
}
|
||||
else
|
||||
{
|
||||
add_edge(step, current);
|
||||
}
|
||||
todo2.push_back(current);
|
||||
is_sink = false;
|
||||
}
|
||||
// otherwise recurse on premises
|
||||
else
|
||||
{
|
||||
for (unsigned i = 0; i < m_learner.m.get_num_parents(current); ++i)
|
||||
{
|
||||
SASSERT(m_learner.m.is_proof(current->get_arg(i)));
|
||||
proof* premise = m.get_parent (current, i);
|
||||
todo.push_back(premise);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (is_sink)
|
||||
{
|
||||
add_edge(step, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
void unsat_core_plugin_min_cut::add_edge(proof* i, proof* j)
|
||||
{
|
||||
unsigned node_i;
|
||||
unsigned node_j;
|
||||
if (i == nullptr)
|
||||
{
|
||||
node_i = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
unsigned tmp;
|
||||
if (m_proof_to_node_plus.find(i, tmp))
|
||||
{
|
||||
node_i = tmp;
|
||||
}
|
||||
else
|
||||
{
|
||||
unsigned node_other = m_min_cut.new_node();
|
||||
node_i = m_min_cut.new_node();
|
||||
|
||||
m_proof_to_node_minus.insert(i, node_other);
|
||||
m_proof_to_node_plus.insert(i, node_i);
|
||||
|
||||
if (node_i >= m_node_to_formula.size())
|
||||
{
|
||||
m_node_to_formula.resize(node_i + 1);
|
||||
}
|
||||
m_node_to_formula[node_other] = m.get_fact(i);
|
||||
m_node_to_formula[node_i] = m.get_fact(i);
|
||||
|
||||
m_min_cut.add_edge(node_other, node_i, 1);
|
||||
}
|
||||
}
|
||||
|
||||
if (j == nullptr)
|
||||
{
|
||||
node_j = 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
unsigned tmp;
|
||||
if (m_proof_to_node_minus.find(j, tmp))
|
||||
{
|
||||
node_j = tmp;
|
||||
}
|
||||
else
|
||||
{
|
||||
node_j = m_min_cut.new_node();
|
||||
unsigned node_other = m_min_cut.new_node();
|
||||
|
||||
m_proof_to_node_minus.insert(j, node_j);
|
||||
m_proof_to_node_plus.insert(j, node_other);
|
||||
|
||||
if (node_other >= m_node_to_formula.size())
|
||||
{
|
||||
m_node_to_formula.resize(node_other + 1);
|
||||
}
|
||||
m_node_to_formula[node_j] = m.get_fact(j);
|
||||
m_node_to_formula[node_other] = m.get_fact(j);
|
||||
|
||||
m_min_cut.add_edge(node_j, node_other, 1);
|
||||
}
|
||||
}
|
||||
|
||||
// finally connect nodes
|
||||
m_min_cut.add_edge(node_i, node_j, 1);
|
||||
}
|
||||
|
||||
void unsat_core_plugin_min_cut::finalize()
|
||||
{
|
||||
vector<unsigned int> cut_nodes;
|
||||
m_min_cut.compute_min_cut(cut_nodes);
|
||||
|
||||
for (unsigned cut_node : cut_nodes)
|
||||
{
|
||||
m_learner.add_lemma_to_core(m_node_to_formula[cut_node]);
|
||||
}
|
||||
}
|
||||
}
|
115
src/muz/spacer/spacer_unsat_core_plugin.h
Normal file
115
src/muz/spacer/spacer_unsat_core_plugin.h
Normal file
|
@ -0,0 +1,115 @@
|
|||
/*++
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_unsat_core_plugin.h
|
||||
|
||||
Abstract:
|
||||
plugin for itp cores
|
||||
|
||||
Author:
|
||||
Bernhard Gleiss
|
||||
|
||||
Revision History:
|
||||
|
||||
|
||||
--*/
|
||||
#ifndef _SPACER_UNSAT_CORE_PLUGIN_H_
|
||||
#define _SPACER_UNSAT_CORE_PLUGIN_H_
|
||||
|
||||
#include "ast.h"
|
||||
#include "spacer_min_cut.h"
|
||||
|
||||
namespace spacer {
|
||||
|
||||
class unsat_core_learner;
|
||||
|
||||
|
||||
class unsat_core_plugin {
|
||||
|
||||
public:
|
||||
unsat_core_plugin(unsat_core_learner& learner) : m_learner(learner){};
|
||||
virtual ~unsat_core_plugin(){};
|
||||
virtual void compute_partial_core(proof* step) = 0;
|
||||
virtual void finalize(){};
|
||||
|
||||
unsat_core_learner& m_learner;
|
||||
};
|
||||
|
||||
|
||||
class unsat_core_plugin_lemma : public unsat_core_plugin {
|
||||
|
||||
public:
|
||||
unsat_core_plugin_lemma(unsat_core_learner& learner) : unsat_core_plugin(learner){};
|
||||
|
||||
virtual void compute_partial_core(proof* step) override;
|
||||
|
||||
private:
|
||||
void add_lowest_split_to_core(proof* step) const;
|
||||
};
|
||||
|
||||
|
||||
class unsat_core_plugin_farkas_lemma : public unsat_core_plugin {
|
||||
|
||||
public:
|
||||
unsat_core_plugin_farkas_lemma(unsat_core_learner& learner, bool split_literals, bool use_constant_from_a=true) : unsat_core_plugin(learner), m_split_literals(split_literals), m_use_constant_from_a(use_constant_from_a) {};
|
||||
|
||||
virtual void compute_partial_core(proof* step) override;
|
||||
|
||||
private:
|
||||
bool m_split_literals;
|
||||
bool m_use_constant_from_a;
|
||||
/*
|
||||
* compute linear combination of literals 'literals' having coefficients 'coefficients' and save result in res
|
||||
*/
|
||||
void compute_linear_combination(const vector<rational>& coefficients, const ptr_vector<app>& literals, expr_ref& res);
|
||||
};
|
||||
|
||||
class unsat_core_plugin_farkas_lemma_optimized : public unsat_core_plugin {
|
||||
|
||||
public:
|
||||
unsat_core_plugin_farkas_lemma_optimized(unsat_core_learner& learner, ast_manager& m) : unsat_core_plugin(learner), m(m) {};
|
||||
|
||||
virtual void compute_partial_core(proof* step) override;
|
||||
virtual void finalize() override;
|
||||
|
||||
protected:
|
||||
vector<vector<std::pair<app*, rational> > > m_linear_combinations;
|
||||
ast_manager& m;
|
||||
/*
|
||||
* compute linear combination of literals 'literals' having coefficients 'coefficients' and save result in res
|
||||
*/
|
||||
void compute_linear_combination(const vector<rational>& coefficients, const ptr_vector<app>& literals, expr_ref& res);
|
||||
};
|
||||
|
||||
class unsat_core_plugin_farkas_lemma_bounded : public unsat_core_plugin_farkas_lemma_optimized {
|
||||
|
||||
public:
|
||||
unsat_core_plugin_farkas_lemma_bounded(unsat_core_learner& learner, ast_manager& m) : unsat_core_plugin_farkas_lemma_optimized(learner, m) {};
|
||||
|
||||
virtual void finalize() override;
|
||||
};
|
||||
|
||||
class unsat_core_plugin_min_cut : public unsat_core_plugin {
|
||||
|
||||
public:
|
||||
unsat_core_plugin_min_cut(unsat_core_learner& learner, ast_manager& m);
|
||||
|
||||
virtual void compute_partial_core(proof* step) override;
|
||||
virtual void finalize() override;
|
||||
private:
|
||||
ast_manager& m;
|
||||
|
||||
ast_mark m_visited; // saves for each node i whether the subproof with root i has already been added to the min-cut-problem
|
||||
obj_map<proof, unsigned> m_proof_to_node_minus; // maps proof-steps to the corresponding minus-nodes (the ones which are closer to source)
|
||||
obj_map<proof, unsigned> m_proof_to_node_plus; // maps proof-steps to the corresponding plus-nodes (the ones which are closer to sink)
|
||||
void advance_to_lowest_partial_cut(proof* step, ptr_vector<proof>& todo2);
|
||||
void add_edge(proof* i, proof* j);
|
||||
|
||||
vector<expr*> m_node_to_formula; // maps each node to the corresponding formula in the original proof
|
||||
|
||||
spacer_min_cut m_min_cut;
|
||||
};
|
||||
}
|
||||
#endif
|
1393
src/muz/spacer/spacer_util.cpp
Normal file
1393
src/muz/spacer/spacer_util.cpp
Normal file
File diff suppressed because it is too large
Load diff
180
src/muz/spacer/spacer_util.h
Normal file
180
src/muz/spacer/spacer_util.h
Normal file
|
@ -0,0 +1,180 @@
|
|||
/*++
|
||||
Copyright (c) 2011 Microsoft Corporation
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_util.h
|
||||
|
||||
Abstract:
|
||||
|
||||
Utility functions for SPACER.
|
||||
|
||||
Author:
|
||||
|
||||
Krystof Hoder (t-khoder) 2011-8-19.
|
||||
Arie Gurfinkel
|
||||
Anvesh Komuravelli
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef _SPACER_UTIL_H_
|
||||
#define _SPACER_UTIL_H_
|
||||
|
||||
#include "ast.h"
|
||||
#include "ast_pp.h"
|
||||
#include "obj_hashtable.h"
|
||||
#include "ref_vector.h"
|
||||
#include "simplifier.h"
|
||||
#include "trace.h"
|
||||
#include "vector.h"
|
||||
#include "arith_decl_plugin.h"
|
||||
#include "array_decl_plugin.h"
|
||||
#include "bv_decl_plugin.h"
|
||||
#include "model.h"
|
||||
|
||||
#include "stopwatch.h"
|
||||
#include "spacer_antiunify.h"
|
||||
|
||||
class model;
|
||||
class model_core;
|
||||
class model_evaluator;
|
||||
|
||||
namespace spacer {
|
||||
|
||||
inline unsigned infty_level () {return UINT_MAX;}
|
||||
|
||||
inline bool is_infty_level(unsigned lvl)
|
||||
{ return lvl == infty_level (); }
|
||||
|
||||
inline unsigned next_level(unsigned lvl)
|
||||
{ return is_infty_level(lvl)?lvl:(lvl+1); }
|
||||
|
||||
inline unsigned prev_level (unsigned lvl)
|
||||
{
|
||||
if(is_infty_level(lvl)) { return infty_level(); }
|
||||
if(lvl == 0) { return 0; }
|
||||
return lvl -1;
|
||||
}
|
||||
|
||||
struct pp_level {
|
||||
unsigned m_level;
|
||||
pp_level(unsigned l): m_level(l) {}
|
||||
};
|
||||
|
||||
inline std::ostream& operator<<(std::ostream& out, pp_level const& p)
|
||||
{
|
||||
if (is_infty_level(p.m_level)) {
|
||||
return out << "oo";
|
||||
} else {
|
||||
return out << p.m_level;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
struct scoped_watch {
|
||||
stopwatch &m_sw;
|
||||
scoped_watch (stopwatch &sw, bool reset=false): m_sw(sw)
|
||||
{
|
||||
if(reset) { m_sw.reset(); }
|
||||
m_sw.start ();
|
||||
}
|
||||
~scoped_watch () {m_sw.stop ();}
|
||||
};
|
||||
|
||||
|
||||
typedef ptr_vector<app> app_vector;
|
||||
typedef ptr_vector<func_decl> decl_vector;
|
||||
typedef obj_hashtable<func_decl> func_decl_set;
|
||||
|
||||
|
||||
class model_evaluator_util {
|
||||
ast_manager& m;
|
||||
model_ref m_model;
|
||||
model_evaluator* m_mev;
|
||||
|
||||
/// initialize with a given model. All previous state is lost. model can be NULL
|
||||
void reset (model *model);
|
||||
public:
|
||||
model_evaluator_util(ast_manager& m);
|
||||
~model_evaluator_util();
|
||||
|
||||
void set_model(model &model) {reset (&model);}
|
||||
model_ref &get_model() {return m_model;}
|
||||
ast_manager& get_ast_manager() const {return m;}
|
||||
|
||||
public:
|
||||
bool is_true (const expr_ref_vector &v);
|
||||
bool is_false(expr* x);
|
||||
bool is_true(expr* x);
|
||||
|
||||
bool eval (const expr_ref_vector &v, expr_ref &result, bool model_completion);
|
||||
/// evaluates an expression
|
||||
bool eval (expr *e, expr_ref &result, bool model_completion);
|
||||
// expr_ref eval(expr* e, bool complete=true);
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
\brief replace variables that are used in many disequalities by
|
||||
an equality using the model.
|
||||
|
||||
Assumption: the model satisfies the conjunctions.
|
||||
*/
|
||||
void reduce_disequalities(model& model, unsigned threshold, expr_ref& fml);
|
||||
|
||||
/**
|
||||
\brief hoist non-boolean if expressions.
|
||||
*/
|
||||
void hoist_non_bool_if(expr_ref& fml);
|
||||
|
||||
bool is_difference_logic(ast_manager& m, unsigned num_fmls, expr* const* fmls);
|
||||
|
||||
bool is_utvpi_logic(ast_manager& m, unsigned num_fmls, expr* const* fmls);
|
||||
|
||||
/**
|
||||
* do the following in sequence
|
||||
* 1. use qe_lite to cheaply eliminate vars
|
||||
* 2. for remaining boolean vars, substitute using M
|
||||
* 3. use MBP for remaining array and arith variables
|
||||
* 4. for any remaining arith variables, substitute using M
|
||||
*/
|
||||
void qe_project (ast_manager& m, app_ref_vector& vars, expr_ref& fml,
|
||||
const model_ref& M, bool reduce_all_selects=false, bool native_mbp=false,
|
||||
bool dont_sub=false);
|
||||
|
||||
void qe_project (ast_manager& m, app_ref_vector& vars, expr_ref& fml, model_ref& M, expr_map& map);
|
||||
|
||||
void expand_literals(ast_manager &m, expr_ref_vector& conjs);
|
||||
void compute_implicant_literals (model_evaluator_util &mev,
|
||||
expr_ref_vector &formula, expr_ref_vector &res);
|
||||
void simplify_bounds (expr_ref_vector &lemmas);
|
||||
void normalize(expr *e, expr_ref &out, bool use_simplify_bounds = true, bool factor_eqs = false);
|
||||
|
||||
/** ground expression by replacing all free variables by skolem constants */
|
||||
void ground_expr (expr *e, expr_ref &out, app_ref_vector &vars);
|
||||
|
||||
|
||||
void mbqi_project (model &M, app_ref_vector &vars, expr_ref &fml);
|
||||
|
||||
bool contains_selects (expr* fml, ast_manager& m);
|
||||
void get_select_indices (expr* fml, app_ref_vector& indices, ast_manager& m);
|
||||
|
||||
void find_decls (expr* fml, app_ref_vector& decls, std::string& prefix);
|
||||
|
||||
/** extended pretty-printer
|
||||
* used for debugging
|
||||
* disables aliasing of common sub-expressions
|
||||
*/
|
||||
struct mk_epp : public mk_pp {
|
||||
params_ref m_epp_params;
|
||||
expr_ref m_epp_expr;
|
||||
mk_epp(ast *t, ast_manager &m, unsigned indent = 0, unsigned num_vars = 0, char const * var_prefix = 0);
|
||||
void rw(expr *e, expr_ref &out);
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
519
src/muz/spacer/spacer_virtual_solver.cpp
Normal file
519
src/muz/spacer/spacer_virtual_solver.cpp
Normal file
|
@ -0,0 +1,519 @@
|
|||
/**
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_virtual_solver.cpp
|
||||
|
||||
Abstract:
|
||||
|
||||
multi-solver view of a single smt::kernel
|
||||
|
||||
Author:
|
||||
|
||||
Arie Gurfinkel
|
||||
|
||||
Notes:
|
||||
|
||||
--*/
|
||||
|
||||
#include "spacer_virtual_solver.h"
|
||||
#include "ast_util.h"
|
||||
#include "ast_pp_util.h"
|
||||
#include "spacer_util.h"
|
||||
#include "bool_rewriter.h"
|
||||
|
||||
#include "proof_checker.h"
|
||||
|
||||
#include "scoped_proof.h"
|
||||
|
||||
namespace spacer {
|
||||
virtual_solver::virtual_solver(virtual_solver_factory &factory,
|
||||
smt::kernel &context, app* pred) :
|
||||
solver_na2as(context.m()),
|
||||
m_factory(factory),
|
||||
m(context.m()),
|
||||
m_context(context),
|
||||
m_pred(pred, m),
|
||||
m_virtual(!m.is_true(pred)),
|
||||
m_assertions(m),
|
||||
m_head(0),
|
||||
m_flat(m),
|
||||
m_pushed(false),
|
||||
m_in_delay_scope(false),
|
||||
m_dump_benchmarks(factory.fparams().m_dump_benchmarks),
|
||||
m_dump_counter(0),
|
||||
m_proof(m)
|
||||
{
|
||||
// -- insert m_pred->true background assumption this will not
|
||||
// -- change m_context, but will add m_pred to
|
||||
// -- the private field solver_na2as::m_assumptions
|
||||
if (m_virtual)
|
||||
{ solver_na2as::assert_expr(m.mk_true(), m_pred); }
|
||||
}
|
||||
|
||||
virtual_solver::~virtual_solver()
|
||||
{
|
||||
SASSERT(!m_pushed || get_scope_level() > 0);
|
||||
if (m_pushed) { pop(get_scope_level()); }
|
||||
|
||||
if (m_virtual) {
|
||||
m_pred = m.mk_not(m_pred);
|
||||
m_context.assert_expr(m_pred);
|
||||
}
|
||||
}
|
||||
|
||||
namespace {
|
||||
static bool matches_fact(expr_ref_vector &args, expr* &match)
|
||||
{
|
||||
ast_manager &m = args.get_manager();
|
||||
expr *fact = args.back();
|
||||
for (unsigned i = 0, sz = args.size() - 1; i < sz; ++i) {
|
||||
expr *arg = args.get(i);
|
||||
if (m.is_proof(arg) &&
|
||||
m.has_fact(to_app(arg)) &&
|
||||
m.get_fact(to_app(arg)) == fact) {
|
||||
match = arg;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
class elim_aux_assertions {
|
||||
app_ref m_aux;
|
||||
public:
|
||||
elim_aux_assertions(app_ref aux) : m_aux(aux) {}
|
||||
|
||||
void mk_or_core(expr_ref_vector &args, expr_ref &res)
|
||||
{
|
||||
ast_manager &m = args.get_manager();
|
||||
unsigned j = 0;
|
||||
for (unsigned i = 0, sz = args.size(); i < sz; ++i) {
|
||||
if (m.is_false(args.get(i))) { continue; }
|
||||
if (i != j) { args [j] = args.get(i); }
|
||||
++j;
|
||||
}
|
||||
SASSERT(j >= 1);
|
||||
res = j > 1 ? m.mk_or(j, args.c_ptr()) : args.get(0);
|
||||
}
|
||||
|
||||
void mk_app(func_decl *decl, expr_ref_vector &args, expr_ref &res)
|
||||
{
|
||||
ast_manager &m = args.get_manager();
|
||||
bool_rewriter brwr(m);
|
||||
|
||||
if (m.is_or(decl))
|
||||
{ mk_or_core(args, res); }
|
||||
else if (m.is_iff(decl) && args.size() == 2)
|
||||
// avoiding simplifying equalities. In particular,
|
||||
// we don't want (= (not a) (not b)) to be reduced to (= a b)
|
||||
{ res = m.mk_iff(args.get(0), args.get(1)); }
|
||||
else
|
||||
{ brwr.mk_app(decl, args.size(), args.c_ptr(), res); }
|
||||
}
|
||||
|
||||
void operator()(ast_manager &m, proof *pr, proof_ref &res)
|
||||
{
|
||||
DEBUG_CODE(proof_checker pc(m);
|
||||
expr_ref_vector side(m);
|
||||
SASSERT(pc.check(pr, side));
|
||||
);
|
||||
obj_map<app, app*> cache;
|
||||
bool_rewriter brwr(m);
|
||||
|
||||
// for reference counting of new proofs
|
||||
app_ref_vector pinned(m);
|
||||
|
||||
ptr_vector<app> todo;
|
||||
todo.push_back(pr);
|
||||
|
||||
expr_ref not_aux(m);
|
||||
not_aux = m.mk_not(m_aux);
|
||||
|
||||
expr_ref_vector args(m);
|
||||
|
||||
while (!todo.empty()) {
|
||||
app *p, *r;
|
||||
expr *a;
|
||||
|
||||
p = todo.back();
|
||||
if (cache.find(pr, r)) {
|
||||
todo.pop_back();
|
||||
continue;
|
||||
}
|
||||
|
||||
SASSERT(!todo.empty() || pr == p);
|
||||
bool dirty = false;
|
||||
unsigned todo_sz = todo.size();
|
||||
args.reset();
|
||||
for (unsigned i = 0, sz = p->get_num_args(); i < sz; ++i) {
|
||||
expr* arg = p->get_arg(i);
|
||||
if (arg == m_aux.get()) {
|
||||
dirty = true;
|
||||
args.push_back(m.mk_true());
|
||||
} else if (arg == not_aux.get()) {
|
||||
dirty = true;
|
||||
args.push_back(m.mk_false());
|
||||
}
|
||||
// skip (asserted m_aux)
|
||||
else if (m.is_asserted(arg, a) && a == m_aux.get()) {
|
||||
dirty = true;
|
||||
}
|
||||
// skip (hypothesis m_aux)
|
||||
else if (m.is_hypothesis(arg, a) && a == m_aux.get()) {
|
||||
dirty = true;
|
||||
} else if (is_app(arg) && cache.find(to_app(arg), r)) {
|
||||
dirty |= (arg != r);
|
||||
args.push_back(r);
|
||||
} else if (is_app(arg))
|
||||
{ todo.push_back(to_app(arg)); }
|
||||
else
|
||||
// -- not an app
|
||||
{ args.push_back(arg); }
|
||||
|
||||
}
|
||||
if (todo_sz < todo.size()) {
|
||||
// -- process parents
|
||||
args.reset();
|
||||
continue;
|
||||
}
|
||||
|
||||
// ready to re-create
|
||||
app_ref newp(m);
|
||||
if (!dirty) { newp = p; }
|
||||
else if (m.is_unit_resolution(p)) {
|
||||
if (args.size() == 2)
|
||||
// unit resolution with m_aux that got collapsed to nothing
|
||||
{ newp = to_app(args.get(0)); }
|
||||
else {
|
||||
ptr_vector<proof> parents;
|
||||
for (unsigned i = 0, sz = args.size() - 1; i < sz; ++i)
|
||||
{ parents.push_back(to_app(args.get(i))); }
|
||||
SASSERT(parents.size() == args.size() - 1);
|
||||
newp = m.mk_unit_resolution(parents.size(), parents.c_ptr());
|
||||
// XXX the old and new facts should be
|
||||
// equivalent. The test here is much
|
||||
// stronger. It might need to be relaxed.
|
||||
SASSERT(m.get_fact(newp) == args.back());
|
||||
pinned.push_back(newp);
|
||||
}
|
||||
} else if (matches_fact(args, a)) {
|
||||
newp = to_app(a);
|
||||
} else {
|
||||
expr_ref papp(m);
|
||||
mk_app(p->get_decl(), args, papp);
|
||||
newp = to_app(papp.get());
|
||||
pinned.push_back(newp);
|
||||
}
|
||||
cache.insert(p, newp);
|
||||
todo.pop_back();
|
||||
CTRACE("virtual",
|
||||
p->get_decl_kind() == PR_TH_LEMMA &&
|
||||
p->get_decl()->get_parameter(0).get_symbol() == "arith" &&
|
||||
p->get_decl()->get_num_parameters() > 1 &&
|
||||
p->get_decl()->get_parameter(1).get_symbol() == "farkas",
|
||||
tout << "Old pf: " << mk_pp(p, m) << "\n"
|
||||
<< "New pf: " << mk_pp(newp, m) << "\n";);
|
||||
}
|
||||
|
||||
proof *r;
|
||||
VERIFY(cache.find(pr, r));
|
||||
|
||||
DEBUG_CODE(
|
||||
proof_checker pc(m);
|
||||
expr_ref_vector side(m);
|
||||
SASSERT(pc.check(r, side));
|
||||
);
|
||||
|
||||
res = r ;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
proof *virtual_solver::get_proof()
|
||||
{
|
||||
scoped_watch _t_(m_factory.m_proof_watch);
|
||||
|
||||
if (!m_proof.get()) {
|
||||
elim_aux_assertions pc(m_pred);
|
||||
m_proof = m_context.get_proof();
|
||||
pc(m, m_proof.get(), m_proof);
|
||||
}
|
||||
return m_proof.get();
|
||||
}
|
||||
|
||||
bool virtual_solver::is_aux_predicate(expr *p)
|
||||
{return is_app(p) && to_app(p) == m_pred.get();}
|
||||
|
||||
lbool virtual_solver::check_sat_core(unsigned num_assumptions,
|
||||
expr *const * assumptions)
|
||||
{
|
||||
SASSERT(!m_pushed || get_scope_level() > 0);
|
||||
m_proof.reset();
|
||||
scoped_watch _t_(m_factory.m_check_watch);
|
||||
m_factory.m_stats.m_num_smt_checks++;
|
||||
|
||||
stopwatch sw;
|
||||
sw.start();
|
||||
internalize_assertions();
|
||||
if (false) {
|
||||
std::stringstream file_name;
|
||||
file_name << "virt_solver";
|
||||
if (m_virtual) { file_name << "_" << m_pred->get_decl()->get_name(); }
|
||||
file_name << "_" << (m_dump_counter++) << ".smt2";
|
||||
|
||||
verbose_stream() << "Dumping SMT2 benchmark: " << file_name.str() << "\n";
|
||||
|
||||
std::ofstream out(file_name.str().c_str());
|
||||
|
||||
to_smt2_benchmark(out, m_context, num_assumptions, assumptions,
|
||||
"virt_solver");
|
||||
|
||||
out << "(exit)\n";
|
||||
out.close();
|
||||
}
|
||||
lbool res = m_context.check(num_assumptions, assumptions);
|
||||
sw.stop();
|
||||
if (res == l_true) {
|
||||
m_factory.m_check_sat_watch.add(sw);
|
||||
m_factory.m_stats.m_num_sat_smt_checks++;
|
||||
} else if (res == l_undef) {
|
||||
m_factory.m_check_undef_watch.add(sw);
|
||||
m_factory.m_stats.m_num_undef_smt_checks++;
|
||||
}
|
||||
set_status(res);
|
||||
|
||||
if (m_dump_benchmarks &&
|
||||
sw.get_seconds() >= m_factory.fparams().m_dump_min_time) {
|
||||
std::stringstream file_name;
|
||||
file_name << "virt_solver";
|
||||
if (m_virtual) { file_name << "_" << m_pred->get_decl()->get_name(); }
|
||||
file_name << "_" << (m_dump_counter++) << ".smt2";
|
||||
|
||||
std::ofstream out(file_name.str().c_str());
|
||||
|
||||
|
||||
out << "(set-info :status ";
|
||||
if (res == l_true) { out << "sat"; }
|
||||
else if (res == l_false) { out << "unsat"; }
|
||||
else { out << "unknown"; }
|
||||
out << ")\n";
|
||||
|
||||
to_smt2_benchmark(out, m_context, num_assumptions, assumptions,
|
||||
"virt_solver");
|
||||
|
||||
out << "(exit)\n";
|
||||
::statistics st;
|
||||
m_context.collect_statistics(st);
|
||||
st.update("time", sw.get_seconds());
|
||||
st.display_smt2(out);
|
||||
|
||||
out.close();
|
||||
|
||||
if (m_factory.fparams().m_dump_recheck) {
|
||||
scoped_no_proof _no_proof_(m);
|
||||
smt_params p;
|
||||
stopwatch sw2;
|
||||
smt::kernel kernel(m, p);
|
||||
for (unsigned i = 0, sz = m_context.size(); i < sz; ++i)
|
||||
{ kernel.assert_expr(m_context.get_formulas()[i]); }
|
||||
sw2.start();
|
||||
kernel.check(num_assumptions, assumptions);
|
||||
sw2.stop();
|
||||
verbose_stream() << file_name.str() << " :orig "
|
||||
<< sw.get_seconds() << " :new " << sw2.get_seconds();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
void virtual_solver::push_core()
|
||||
{
|
||||
SASSERT(!m_pushed || get_scope_level() > 0);
|
||||
if (m_in_delay_scope) {
|
||||
// second push
|
||||
internalize_assertions();
|
||||
m_context.push();
|
||||
m_pushed = true;
|
||||
m_in_delay_scope = false;
|
||||
}
|
||||
|
||||
if (!m_pushed) { m_in_delay_scope = true; }
|
||||
else {
|
||||
SASSERT(m_pushed);
|
||||
SASSERT(!m_in_delay_scope);
|
||||
m_context.push();
|
||||
}
|
||||
}
|
||||
void virtual_solver::pop_core(unsigned n)
|
||||
{
|
||||
SASSERT(!m_pushed || get_scope_level() > 0);
|
||||
if (m_pushed) {
|
||||
SASSERT(!m_in_delay_scope);
|
||||
m_context.pop(n);
|
||||
m_pushed = get_scope_level() - n > 0;
|
||||
} else
|
||||
{ m_in_delay_scope = get_scope_level() - n > 0; }
|
||||
}
|
||||
|
||||
void virtual_solver::get_unsat_core(ptr_vector<expr> &r)
|
||||
{
|
||||
for (unsigned i = 0, sz = m_context.get_unsat_core_size(); i < sz; ++i) {
|
||||
expr *core = m_context.get_unsat_core_expr(i);
|
||||
if (is_aux_predicate(core)) { continue; }
|
||||
r.push_back(core);
|
||||
}
|
||||
}
|
||||
|
||||
void virtual_solver::assert_expr(expr *e)
|
||||
{
|
||||
SASSERT(!m_pushed || get_scope_level() > 0);
|
||||
if (m.is_true(e)) { return; }
|
||||
if (m_in_delay_scope) {
|
||||
internalize_assertions();
|
||||
m_context.push();
|
||||
m_pushed = true;
|
||||
m_in_delay_scope = false;
|
||||
}
|
||||
|
||||
if (m_pushed)
|
||||
{ m_context.assert_expr(e); }
|
||||
else {
|
||||
m_flat.push_back(e);
|
||||
flatten_and(m_flat);
|
||||
m_assertions.append(m_flat);
|
||||
m_flat.reset();
|
||||
}
|
||||
}
|
||||
void virtual_solver::internalize_assertions()
|
||||
{
|
||||
SASSERT(!m_pushed || m_head == m_assertions.size());
|
||||
for (unsigned sz = m_assertions.size(); m_head < sz; ++m_head) {
|
||||
expr_ref f(m);
|
||||
f = m.mk_implies(m_pred, (m_assertions.get(m_head)));
|
||||
m_context.assert_expr(f);
|
||||
}
|
||||
}
|
||||
void virtual_solver::refresh()
|
||||
{
|
||||
SASSERT(!m_pushed);
|
||||
m_head = 0;
|
||||
}
|
||||
|
||||
void virtual_solver::reset()
|
||||
{
|
||||
SASSERT(!m_pushed);
|
||||
m_head = 0;
|
||||
m_assertions.reset();
|
||||
m_factory.refresh();
|
||||
}
|
||||
|
||||
void virtual_solver::get_labels(svector<symbol> &r)
|
||||
{
|
||||
r.reset();
|
||||
buffer<symbol> tmp;
|
||||
m_context.get_relevant_labels(0, tmp);
|
||||
r.append(tmp.size(), tmp.c_ptr());
|
||||
}
|
||||
|
||||
solver* virtual_solver::translate(ast_manager& m, params_ref const& p)
|
||||
{
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
void virtual_solver::updt_params(params_ref const &p)
|
||||
{ m_factory.updt_params(p); }
|
||||
void virtual_solver::collect_param_descrs(param_descrs &r)
|
||||
{ m_factory.collect_param_descrs(r); }
|
||||
void virtual_solver::set_produce_models(bool f)
|
||||
{ m_factory.set_produce_models(f); }
|
||||
bool virtual_solver::get_produce_models()
|
||||
{return m_factory.get_produce_models(); }
|
||||
smt_params &virtual_solver::fparams()
|
||||
{return m_factory.fparams();}
|
||||
|
||||
void virtual_solver::to_smt2_benchmark(std::ostream &out,
|
||||
smt::kernel &context,
|
||||
unsigned num_assumptions,
|
||||
expr * const * assumptions,
|
||||
char const * name,
|
||||
symbol const &logic,
|
||||
char const * status,
|
||||
char const * attributes)
|
||||
{
|
||||
ast_pp_util pp(m);
|
||||
expr_ref_vector asserts(m);
|
||||
|
||||
|
||||
for (unsigned i = 0, sz = context.size(); i < sz; ++i) {
|
||||
asserts.push_back(context.get_formulas()[i]);
|
||||
pp.collect(asserts.back());
|
||||
}
|
||||
pp.collect(num_assumptions, assumptions);
|
||||
pp.display_decls(out);
|
||||
pp.display_asserts(out, asserts);
|
||||
out << "(check-sat ";
|
||||
for (unsigned i = 0; i < num_assumptions; ++i)
|
||||
{ out << mk_pp(assumptions[i], m) << " "; }
|
||||
out << ")\n";
|
||||
}
|
||||
|
||||
|
||||
virtual_solver_factory::virtual_solver_factory(ast_manager &mgr, smt_params &fparams) :
|
||||
m_fparams(fparams), m(mgr), m_context(m, m_fparams)
|
||||
{
|
||||
m_stats.reset();
|
||||
}
|
||||
|
||||
virtual_solver* virtual_solver_factory::mk_solver()
|
||||
{
|
||||
std::stringstream name;
|
||||
name << "vsolver#" << m_solvers.size();
|
||||
app_ref pred(m);
|
||||
pred = m.mk_const(symbol(name.str().c_str()), m.mk_bool_sort());
|
||||
SASSERT(m_context.get_scope_level() == 0);
|
||||
m_solvers.push_back(alloc(virtual_solver, *this, m_context, pred));
|
||||
return m_solvers.back();
|
||||
}
|
||||
|
||||
void virtual_solver_factory::collect_statistics(statistics &st) const
|
||||
{
|
||||
m_context.collect_statistics(st);
|
||||
st.update("time.virtual_solver.smt.total", m_check_watch.get_seconds());
|
||||
st.update("time.virtual_solver.smt.total.sat", m_check_sat_watch.get_seconds());
|
||||
st.update("time.virtual_solver.smt.total.undef", m_check_undef_watch.get_seconds());
|
||||
st.update("time.virtual_solver.proof", m_proof_watch.get_seconds());
|
||||
st.update("virtual_solver.checks", m_stats.m_num_smt_checks);
|
||||
st.update("virtual_solver.checks.sat", m_stats.m_num_sat_smt_checks);
|
||||
st.update("virtual_solver.checks.undef", m_stats.m_num_undef_smt_checks);
|
||||
}
|
||||
void virtual_solver_factory::reset_statistics()
|
||||
{
|
||||
m_context.reset_statistics();
|
||||
m_stats.reset();
|
||||
m_check_sat_watch.reset();
|
||||
m_check_undef_watch.reset();
|
||||
m_check_watch.reset();
|
||||
m_proof_watch.reset();
|
||||
}
|
||||
|
||||
void virtual_solver_factory::refresh()
|
||||
{
|
||||
m_context.reset();
|
||||
for (unsigned i = 0, e = m_solvers.size(); i < e; ++i)
|
||||
{ m_solvers [i]->refresh(); }
|
||||
}
|
||||
|
||||
virtual_solver_factory::~virtual_solver_factory()
|
||||
{
|
||||
for (unsigned i = 0, e = m_solvers.size(); i < e; ++i)
|
||||
{ dealloc(m_solvers [i]); }
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
153
src/muz/spacer/spacer_virtual_solver.h
Normal file
153
src/muz/spacer/spacer_virtual_solver.h
Normal file
|
@ -0,0 +1,153 @@
|
|||
/**
|
||||
Copyright (c) 2017 Arie Gurfinkel
|
||||
|
||||
Module Name:
|
||||
|
||||
spacer_virtual_solver.h
|
||||
|
||||
Abstract:
|
||||
|
||||
multi-solver view of a single smt::kernel
|
||||
|
||||
Author:
|
||||
|
||||
Arie Gurfinkel
|
||||
|
||||
Notes:
|
||||
|
||||
--*/
|
||||
#ifndef SPACER_VIRTUAL_SOLVER_H_
|
||||
#define SPACER_VIRTUAL_SOLVER_H_
|
||||
#include"ast.h"
|
||||
#include"params.h"
|
||||
#include"solver_na2as.h"
|
||||
#include"smt_kernel.h"
|
||||
#include"smt_params.h"
|
||||
#include"stopwatch.h"
|
||||
namespace spacer {
|
||||
class virtual_solver_factory;
|
||||
|
||||
class virtual_solver : public solver_na2as {
|
||||
friend class virtual_solver_factory;
|
||||
|
||||
private:
|
||||
virtual_solver_factory &m_factory;
|
||||
ast_manager &m;
|
||||
smt::kernel &m_context;
|
||||
app_ref m_pred;
|
||||
|
||||
bool m_virtual;
|
||||
expr_ref_vector m_assertions;
|
||||
unsigned m_head;
|
||||
// temporary to flatten conjunction
|
||||
expr_ref_vector m_flat;
|
||||
|
||||
bool m_pushed;
|
||||
bool m_in_delay_scope;
|
||||
bool m_dump_benchmarks;
|
||||
unsigned m_dump_counter;
|
||||
|
||||
proof_ref m_proof;
|
||||
|
||||
virtual_solver(virtual_solver_factory &factory, smt::kernel &context, app* pred);
|
||||
|
||||
bool is_aux_predicate(expr *p);
|
||||
void internalize_assertions();
|
||||
void to_smt2_benchmark(std::ostream &out,
|
||||
smt::kernel &context,
|
||||
unsigned num_assumptions,
|
||||
expr * const * assumptions,
|
||||
char const * name = "benchmarks",
|
||||
symbol const &logic = symbol::null,
|
||||
char const * status = "unknown",
|
||||
char const * attributes = "");
|
||||
|
||||
void refresh();
|
||||
|
||||
public:
|
||||
virtual ~virtual_solver();
|
||||
virtual unsigned get_num_assumptions() const
|
||||
{
|
||||
unsigned sz = solver_na2as::get_num_assumptions();
|
||||
return m_virtual ? sz - 1 : sz;
|
||||
}
|
||||
virtual expr* get_assumption(unsigned idx) const
|
||||
{
|
||||
if(m_virtual) { idx++; }
|
||||
return solver_na2as::get_assumption(idx);
|
||||
}
|
||||
|
||||
virtual void get_unsat_core(ptr_vector<expr> &r);
|
||||
virtual void assert_expr(expr *e);
|
||||
virtual void collect_statistics(statistics &st) const {}
|
||||
virtual void get_model(model_ref &m) {m_context.get_model(m);}
|
||||
virtual proof* get_proof();
|
||||
virtual std::string reason_unknown() const
|
||||
{return m_context.last_failure_as_string();}
|
||||
virtual void set_reason_unknown(char const *msg)
|
||||
{m_context.set_reason_unknown(msg);}
|
||||
virtual ast_manager& get_manager() const {return m;}
|
||||
virtual void get_labels(svector<symbol> &r);
|
||||
virtual void set_produce_models(bool f);
|
||||
virtual bool get_produce_models();
|
||||
virtual smt_params &fparams();
|
||||
virtual void reset();
|
||||
|
||||
virtual void set_progress_callback(progress_callback *callback)
|
||||
{UNREACHABLE();}
|
||||
|
||||
virtual solver *translate(ast_manager &m, params_ref const &p);
|
||||
|
||||
virtual void updt_params(params_ref const &p);
|
||||
virtual void collect_param_descrs(param_descrs &r);
|
||||
|
||||
|
||||
protected:
|
||||
virtual lbool check_sat_core(unsigned num_assumptions, expr *const * assumptions);
|
||||
virtual void push_core();
|
||||
virtual void pop_core(unsigned n);
|
||||
};
|
||||
|
||||
/// multi-solver abstraction on top of a single smt::kernel
|
||||
class virtual_solver_factory {
|
||||
friend class virtual_solver;
|
||||
private:
|
||||
smt_params &m_fparams;
|
||||
ast_manager &m;
|
||||
smt::kernel m_context;
|
||||
/// solvers managed by this factory
|
||||
ptr_vector<virtual_solver> m_solvers;
|
||||
|
||||
struct stats {
|
||||
unsigned m_num_smt_checks;
|
||||
unsigned m_num_sat_smt_checks;
|
||||
unsigned m_num_undef_smt_checks;
|
||||
stats() { reset(); }
|
||||
void reset() { memset(this, 0, sizeof(*this)); }
|
||||
};
|
||||
|
||||
stats m_stats;
|
||||
stopwatch m_check_watch;
|
||||
stopwatch m_check_sat_watch;
|
||||
stopwatch m_check_undef_watch;
|
||||
stopwatch m_proof_watch;
|
||||
|
||||
|
||||
void refresh();
|
||||
public:
|
||||
virtual_solver_factory(ast_manager &mgr, smt_params &fparams);
|
||||
virtual ~virtual_solver_factory();
|
||||
virtual_solver* mk_solver();
|
||||
void collect_statistics(statistics &st) const;
|
||||
void reset_statistics();
|
||||
void updt_params(params_ref const &p) { m_fparams.updt_params(p); }
|
||||
void collect_param_descrs(param_descrs &r) { /* empty */ }
|
||||
void set_produce_models(bool f) { m_fparams.m_model = f; }
|
||||
bool get_produce_models() { return m_fparams.m_model; }
|
||||
smt_params &fparams() { return m_fparams; }
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
||||
#endif
|
|
@ -21,6 +21,8 @@ z3_add_component(transforms
|
|||
dl_mk_unbound_compressor.cpp
|
||||
dl_mk_unfold.cpp
|
||||
dl_transforms.cpp
|
||||
dl_mk_array_eq_rewrite.cpp
|
||||
dl_mk_array_instantiation.cpp
|
||||
COMPONENT_DEPENDENCIES
|
||||
dataflow
|
||||
hilbert
|
||||
|
|
|
@ -319,6 +319,9 @@ namespace datalog {
|
|||
|
||||
rule_set * mk_array_blast::operator()(rule_set const & source) {
|
||||
|
||||
if (!m_ctx.array_blast ()) {
|
||||
return 0;
|
||||
}
|
||||
rule_set* rules = alloc(rule_set, m_ctx);
|
||||
rules->inherit_predicates(source);
|
||||
rule_set::iterator it = source.begin(), end = source.end();
|
||||
|
|
140
src/muz/transforms/dl_mk_array_eq_rewrite.cpp
Normal file
140
src/muz/transforms/dl_mk_array_eq_rewrite.cpp
Normal file
|
@ -0,0 +1,140 @@
|
|||
/*++
|
||||
|
||||
Module Name:
|
||||
|
||||
dl_mk_array_eq_rewrite.h
|
||||
|
||||
Abstract:
|
||||
Selects a representative for array equivalence classes.
|
||||
|
||||
Author:
|
||||
|
||||
Julien Braine
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
|
||||
#include "dl_mk_array_eq_rewrite.h"
|
||||
#include "dl_context.h"
|
||||
#include "pattern_inference.h"
|
||||
#include "dl_context.h"
|
||||
#include "expr_safe_replace.h"
|
||||
#include "expr_abstract.h"
|
||||
#include"fixedpoint_params.hpp"
|
||||
#include "../spacer/obj_equiv_class.h"
|
||||
|
||||
namespace datalog {
|
||||
|
||||
mk_array_eq_rewrite::mk_array_eq_rewrite(
|
||||
context & ctx, unsigned priority):
|
||||
plugin(priority),
|
||||
m(ctx.get_manager()),
|
||||
m_ctx(ctx),
|
||||
m_a(m)
|
||||
{
|
||||
}
|
||||
|
||||
rule_set * mk_array_eq_rewrite::operator()(rule_set const & source)
|
||||
{
|
||||
src_set = &source;
|
||||
rule_set * result = alloc(rule_set, m_ctx);
|
||||
result->inherit_predicates(source);
|
||||
dst=result;
|
||||
unsigned nbrules = source.get_num_rules();
|
||||
src_manager = &source.get_rule_manager();
|
||||
for(unsigned i =0;i<nbrules;i++)
|
||||
{
|
||||
rule & r = *source.get_rule(i);
|
||||
instantiate_rule(r, *result);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void mk_array_eq_rewrite::instantiate_rule(const rule& r, rule_set & dest)
|
||||
{
|
||||
//Reset everything
|
||||
cnt = src_manager->get_counter().get_max_rule_var(r)+1;
|
||||
|
||||
|
||||
expr_ref_vector new_tail(m);
|
||||
unsigned nb_predicates = r.get_uninterpreted_tail_size();
|
||||
unsigned tail_size = r.get_tail_size();
|
||||
for(unsigned i=0;i<nb_predicates;i++)
|
||||
{
|
||||
new_tail.push_back(r.get_tail(i));
|
||||
}
|
||||
|
||||
spacer::expr_equiv_class array_eq_classes(m);
|
||||
for(unsigned i=nb_predicates;i<tail_size;i++)
|
||||
{
|
||||
expr* cond = r.get_tail(i);
|
||||
expr* e1, *e2;
|
||||
if(m.is_eq(cond, e1, e2) && m_a.is_array(get_sort(e1)))
|
||||
{
|
||||
array_eq_classes.merge(e1, e2);
|
||||
}
|
||||
else
|
||||
{
|
||||
new_tail.push_back(cond);
|
||||
}
|
||||
}
|
||||
|
||||
for(spacer::expr_equiv_class::equiv_iterator c_eq = array_eq_classes.begin();
|
||||
c_eq != array_eq_classes.end();++c_eq)
|
||||
{
|
||||
expr* representative = *(*c_eq).begin();
|
||||
for(spacer::expr_equiv_class::iterator it = (*c_eq).begin();
|
||||
it!=(*c_eq).end(); ++it)
|
||||
{
|
||||
if(!is_var(*it))
|
||||
{
|
||||
representative = *it;
|
||||
break;
|
||||
}
|
||||
}
|
||||
for(spacer::expr_equiv_class::iterator it = (*c_eq).begin();
|
||||
it!=(*c_eq).end(); ++it)
|
||||
{
|
||||
for(unsigned i=0;i<new_tail.size();i++)
|
||||
new_tail[i] = replace(new_tail[i].get(), representative, *it);
|
||||
}
|
||||
for(spacer::expr_equiv_class::iterator it = (*c_eq).begin();
|
||||
it!=(*c_eq).end(); ++it)
|
||||
{
|
||||
new_tail.push_back(m.mk_eq(*it, representative));
|
||||
}
|
||||
}
|
||||
params_ref select_over_store;
|
||||
select_over_store.set_bool("expand_select_store", true);
|
||||
th_rewriter t(m, select_over_store);
|
||||
expr_ref_vector res_conjs(m);
|
||||
for(unsigned i=0;i<new_tail.size();i++)
|
||||
{
|
||||
expr_ref tmp(m);
|
||||
t(new_tail[i].get(), tmp);
|
||||
res_conjs.push_back(tmp);
|
||||
}
|
||||
proof_ref pr(m);
|
||||
src_manager->mk_rule(m.mk_implies(m.mk_and(res_conjs.size(), res_conjs.c_ptr()), r.get_head()), pr, dest, r.name());
|
||||
}
|
||||
|
||||
expr* mk_array_eq_rewrite::replace(expr* e, expr* new_val, expr* old_val)
|
||||
{
|
||||
if(e==old_val)
|
||||
return new_val;
|
||||
else if(!is_app(e))
|
||||
{
|
||||
return e;
|
||||
}
|
||||
app*f = to_app(e);
|
||||
ptr_vector<expr> n_args;
|
||||
for(unsigned i=0;i<f->get_num_args();i++)
|
||||
{
|
||||
n_args.push_back(replace(f->get_arg(i), new_val, old_val));
|
||||
}
|
||||
return m.mk_app(f->get_decl(), n_args.size(), n_args.c_ptr());
|
||||
}
|
||||
|
||||
}
|
54
src/muz/transforms/dl_mk_array_eq_rewrite.h
Normal file
54
src/muz/transforms/dl_mk_array_eq_rewrite.h
Normal file
|
@ -0,0 +1,54 @@
|
|||
/*++
|
||||
|
||||
Module Name:
|
||||
|
||||
dl_mk_array_eq_rewrite.h
|
||||
|
||||
Abstract:
|
||||
Selects a representative for array equivalence classes.
|
||||
|
||||
Author:
|
||||
|
||||
Julien Braine
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef DL_MK_ARRAY_EQ_REWRITE_H_
|
||||
#define DL_MK_ARRAY_EQ_REWRITE_H_
|
||||
|
||||
|
||||
#include "dl_rule_transformer.h"
|
||||
#include "../spacer/obj_equiv_class.h"
|
||||
|
||||
namespace datalog {
|
||||
|
||||
class context;
|
||||
class mk_array_eq_rewrite : public rule_transformer::plugin {
|
||||
//Context objects
|
||||
ast_manager& m;
|
||||
context& m_ctx;
|
||||
array_util m_a;
|
||||
|
||||
//Rule set context
|
||||
const rule_set*src_set;
|
||||
rule_set*dst;
|
||||
rule_manager* src_manager;
|
||||
unsigned cnt;//Index for new variables
|
||||
|
||||
expr* replace(expr* e, expr* new_val, expr* old_val);
|
||||
void instantiate_rule(const rule& r, rule_set & dest);
|
||||
|
||||
public:
|
||||
mk_array_eq_rewrite(context & ctx, unsigned priority);
|
||||
rule_set * operator()(rule_set const & source);
|
||||
virtual ~mk_array_eq_rewrite(){}
|
||||
};
|
||||
|
||||
|
||||
|
||||
};
|
||||
|
||||
#endif /* DL_MK_ARRAY_EQ_REWRITE_H_ */
|
||||
|
324
src/muz/transforms/dl_mk_array_instantiation.cpp
Normal file
324
src/muz/transforms/dl_mk_array_instantiation.cpp
Normal file
|
@ -0,0 +1,324 @@
|
|||
/*++
|
||||
|
||||
Module Name:
|
||||
|
||||
dl_mk_array_instantiation.h
|
||||
|
||||
Abstract:
|
||||
|
||||
Does array instantiation
|
||||
|
||||
Author:
|
||||
|
||||
Julien Braine
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
|
||||
#include "dl_mk_array_instantiation.h"
|
||||
#include "dl_context.h"
|
||||
#include "pattern_inference.h"
|
||||
#include "dl_context.h"
|
||||
#include "expr_safe_replace.h"
|
||||
#include "expr_abstract.h"
|
||||
#include"fixedpoint_params.hpp"
|
||||
#include "../spacer/obj_equiv_class.h"
|
||||
|
||||
namespace datalog {
|
||||
|
||||
mk_array_instantiation::mk_array_instantiation(
|
||||
context & ctx, unsigned priority):
|
||||
plugin(priority),
|
||||
m(ctx.get_manager()),
|
||||
m_ctx(ctx),
|
||||
m_a(m),
|
||||
eq_classes(m),
|
||||
ownership(m)
|
||||
{
|
||||
}
|
||||
|
||||
rule_set * mk_array_instantiation::operator()(rule_set const & source)
|
||||
{
|
||||
std::cout<<"Array Instantiation called with parameters :"
|
||||
<<" enforce="<<m_ctx.get_params().xform_instantiate_arrays_enforce()
|
||||
<<" nb_quantifier="<<m_ctx.get_params().xform_instantiate_arrays_nb_quantifier()
|
||||
<<" slice_technique="<<m_ctx.get_params().xform_instantiate_arrays_slice_technique()
|
||||
<<"\n";
|
||||
std::cout<<"Input rules = \n";
|
||||
source.display(std::cout);
|
||||
src_set = &source;
|
||||
rule_set * result = alloc(rule_set, m_ctx);
|
||||
dst=result;
|
||||
unsigned nbrules = source.get_num_rules();
|
||||
src_manager = &source.get_rule_manager();
|
||||
for(unsigned i =0;i<nbrules;i++)
|
||||
{
|
||||
rule & r = *source.get_rule(i);
|
||||
instantiate_rule(r, *result);
|
||||
}
|
||||
std::cout<<"\n\nOutput rules = \n";
|
||||
result->display(std::cout);
|
||||
return result;
|
||||
}
|
||||
|
||||
void mk_array_instantiation::instantiate_rule(const rule& r, rule_set & dest)
|
||||
{
|
||||
//Reset everything
|
||||
selects.reset();
|
||||
eq_classes.reset();
|
||||
cnt = src_manager->get_counter().get_max_rule_var(r)+1;
|
||||
done_selects.reset();
|
||||
ownership.reset();
|
||||
|
||||
expr_ref_vector phi(m);
|
||||
expr_ref_vector preds(m);
|
||||
expr_ref new_head = create_head(to_app(r.get_head()));
|
||||
unsigned nb_predicates = r.get_uninterpreted_tail_size();
|
||||
unsigned tail_size = r.get_tail_size();
|
||||
for(unsigned i=0;i<nb_predicates;i++)
|
||||
{
|
||||
preds.push_back(r.get_tail(i));
|
||||
}
|
||||
for(unsigned i=nb_predicates;i<tail_size;i++)
|
||||
{
|
||||
phi.push_back(r.get_tail(i));
|
||||
}
|
||||
|
||||
//Retrieve selects
|
||||
for(unsigned i=0;i<phi.size();i++)
|
||||
retrieve_selects(phi[i].get());
|
||||
|
||||
//Rewrite the predicates
|
||||
expr_ref_vector new_tail(m);
|
||||
for(unsigned i=0;i<preds.size();i++)
|
||||
{
|
||||
new_tail.append(instantiate_pred(to_app(preds[i].get())));
|
||||
}
|
||||
new_tail.append(phi);
|
||||
for(obj_map<expr, var*>::iterator it = done_selects.begin(); it!=done_selects.end(); ++it)
|
||||
{
|
||||
expr_ref tmp(m);
|
||||
tmp = &it->get_key();
|
||||
new_tail.push_back(m.mk_eq(it->get_value(), tmp));
|
||||
}
|
||||
proof_ref pr(m);
|
||||
src_manager->mk_rule(m.mk_implies(m.mk_and(new_tail.size(), new_tail.c_ptr()), new_head), pr, dest, r.name());
|
||||
}
|
||||
|
||||
expr_ref mk_array_instantiation::create_head(app* old_head)
|
||||
{
|
||||
expr_ref_vector new_args(m);
|
||||
for(unsigned i=0;i<old_head->get_num_args();i++)
|
||||
{
|
||||
expr*arg = old_head->get_arg(i);
|
||||
if(m_a.is_array(get_sort(arg)))
|
||||
{
|
||||
for(unsigned k=0; k< m_ctx.get_params().xform_instantiate_arrays_nb_quantifier();k++)
|
||||
{
|
||||
expr_ref_vector dummy_args(m);
|
||||
dummy_args.push_back(arg);
|
||||
for(unsigned i=0;i<get_array_arity(get_sort(arg));i++)
|
||||
{
|
||||
dummy_args.push_back(m.mk_var(cnt, get_array_domain(get_sort(arg), i)));
|
||||
cnt++;
|
||||
}
|
||||
expr_ref select(m);
|
||||
select = m_a.mk_select(dummy_args.size(), dummy_args.c_ptr());
|
||||
new_args.push_back(select);
|
||||
selects.insert_if_not_there(arg, ptr_vector<expr>());
|
||||
selects[arg].push_back(select);
|
||||
}
|
||||
if(!m_ctx.get_params().xform_instantiate_arrays_enforce())
|
||||
new_args.push_back(arg);
|
||||
}
|
||||
else
|
||||
new_args.push_back(arg);
|
||||
}
|
||||
return create_pred(old_head, new_args);
|
||||
}
|
||||
|
||||
|
||||
void mk_array_instantiation::retrieve_selects(expr* e)
|
||||
{
|
||||
//If the expression is not a function application, we ignore it
|
||||
if (!is_app(e)) {
|
||||
return;
|
||||
}
|
||||
app*f=to_app(e);
|
||||
//Call the function recursively on all arguments
|
||||
unsigned nbargs = f->get_num_args();
|
||||
for(unsigned i=0;i<nbargs;i++)
|
||||
{
|
||||
retrieve_selects(f->get_arg(i));
|
||||
}
|
||||
//If it is a select, then add it to selects
|
||||
if(m_a.is_select(f))
|
||||
{
|
||||
SASSERT(!m_a.is_array(get_sort(e)));
|
||||
selects.insert_if_not_there(f->get_arg(0), ptr_vector<expr>());
|
||||
selects[f->get_arg(0)].push_back(e);
|
||||
}
|
||||
//If it is a condition between arrays, for example the result of a store, then add it to the equiv_classes
|
||||
if(m_a.is_store(f))
|
||||
{
|
||||
eq_classes.merge(e, f->get_arg(0));
|
||||
}
|
||||
else if(m.is_eq(f) && m_a.is_array(get_sort(f->get_arg(0))))
|
||||
{
|
||||
eq_classes.merge(f->get_arg(0), f->get_arg(1));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
expr_ref_vector mk_array_instantiation::getId(app*old_pred, const expr_ref_vector& n_args)
|
||||
{
|
||||
expr_ref_vector res(m);
|
||||
for(unsigned i=0;i<n_args.size(); i++)
|
||||
{
|
||||
if(m_a.is_select(n_args[i]))
|
||||
{
|
||||
app*select = to_app(n_args[i]);
|
||||
for(unsigned j=1;j<select->get_num_args();j++)
|
||||
{
|
||||
res.push_back(select->get_arg(j));
|
||||
}
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
expr_ref mk_array_instantiation::create_pred(app*old_pred, expr_ref_vector& n_args)
|
||||
{
|
||||
expr_ref_vector new_args(m);
|
||||
new_args.append(n_args);
|
||||
new_args.append(getId(old_pred, n_args));
|
||||
for(unsigned i=0;i<new_args.size();i++)
|
||||
{
|
||||
if(m_a.is_select(new_args[i].get()))
|
||||
{
|
||||
new_args[i] = mk_select_var(new_args[i].get());
|
||||
}
|
||||
}
|
||||
sort_ref_vector new_sorts(m);
|
||||
for(unsigned i=0;i<new_args.size();i++)
|
||||
new_sorts.push_back(get_sort(new_args[i].get()));
|
||||
expr_ref res(m);
|
||||
func_decl_ref fun_decl(m);
|
||||
fun_decl = m.mk_func_decl(symbol((old_pred->get_decl()->get_name().str()+"!inst").c_str()), new_sorts.size(), new_sorts.c_ptr(), old_pred->get_decl()->get_range());
|
||||
m_ctx.register_predicate(fun_decl, false);
|
||||
if(src_set->is_output_predicate(old_pred->get_decl()))
|
||||
dst->set_output_predicate(fun_decl);
|
||||
res=m.mk_app(fun_decl,new_args.size(), new_args.c_ptr());
|
||||
return res;
|
||||
}
|
||||
|
||||
var * mk_array_instantiation::mk_select_var(expr* select)
|
||||
{
|
||||
var*result;
|
||||
if(!done_selects.find(select, result))
|
||||
{
|
||||
ownership.push_back(select);
|
||||
result = m.mk_var(cnt, get_sort(select));
|
||||
cnt++;
|
||||
done_selects.insert(select, result);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
expr_ref mk_array_instantiation::rewrite_select(expr*array, expr*select)
|
||||
{
|
||||
app*s = to_app(select);
|
||||
expr_ref res(m);
|
||||
expr_ref_vector args(m);
|
||||
args.push_back(array);
|
||||
for(unsigned i=1; i<s->get_num_args();i++)
|
||||
{
|
||||
args.push_back(s->get_arg(i));
|
||||
}
|
||||
res = m_a.mk_select(args.size(), args.c_ptr());
|
||||
return res;
|
||||
}
|
||||
|
||||
expr_ref_vector mk_array_instantiation::retrieve_all_selects(expr*array)
|
||||
{
|
||||
expr_ref_vector all_selects(m);
|
||||
for(spacer::expr_equiv_class::iterator it = eq_classes.begin(array);
|
||||
it != eq_classes.end(array); ++it)
|
||||
{
|
||||
selects.insert_if_not_there(*it, ptr_vector<expr>());
|
||||
ptr_vector<expr>& select_ops = selects[*it];
|
||||
for(unsigned i=0;i<select_ops.size();i++)
|
||||
{
|
||||
all_selects.push_back(rewrite_select(array, select_ops[i]));
|
||||
}
|
||||
}
|
||||
if(all_selects.size()==0)
|
||||
{
|
||||
expr_ref_vector dummy_args(m);
|
||||
dummy_args.push_back(array);
|
||||
for(unsigned i=0;i<get_array_arity(get_sort(array));i++)
|
||||
{
|
||||
dummy_args.push_back(m.mk_var(cnt, get_array_domain(get_sort(array), i)));
|
||||
cnt++;
|
||||
}
|
||||
all_selects.push_back(m_a.mk_select(dummy_args.size(), dummy_args.c_ptr()));
|
||||
}
|
||||
return all_selects;
|
||||
}
|
||||
|
||||
|
||||
expr_ref_vector mk_array_instantiation::instantiate_pred(app*old_pred)
|
||||
{
|
||||
|
||||
unsigned nb_old_args=old_pred->get_num_args();
|
||||
//Stores, for each old position, the list of a new possible arguments
|
||||
vector<expr_ref_vector> arg_correspondance;
|
||||
for(unsigned i=0;i<nb_old_args;i++)
|
||||
{
|
||||
expr_ref arg(old_pred->get_arg(i), m);
|
||||
if(m_a.is_array(get_sort(arg)))
|
||||
{
|
||||
vector<expr_ref_vector> arg_possibilities(m_ctx.get_params().xform_instantiate_arrays_nb_quantifier(), retrieve_all_selects(arg));
|
||||
arg_correspondance.append(arg_possibilities);
|
||||
if(!m_ctx.get_params().xform_instantiate_arrays_enforce())
|
||||
{
|
||||
expr_ref_vector tmp(m);
|
||||
tmp.push_back(arg);
|
||||
arg_correspondance.push_back(tmp);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
expr_ref_vector tmp(m);
|
||||
tmp.push_back(arg);
|
||||
arg_correspondance.push_back(tmp);
|
||||
}
|
||||
}
|
||||
//Now, we need to deal with every combination
|
||||
|
||||
expr_ref_vector res(m);
|
||||
|
||||
svector<unsigned> chosen(arg_correspondance.size(), 0u);
|
||||
while(1)
|
||||
{
|
||||
expr_ref_vector new_args(m);
|
||||
for(unsigned i=0;i<chosen.size();i++)
|
||||
{
|
||||
new_args.push_back(arg_correspondance[i][chosen[i]].get());
|
||||
}
|
||||
res.push_back(create_pred(old_pred, new_args));
|
||||
unsigned pos=-1;
|
||||
do
|
||||
{
|
||||
pos++;
|
||||
if(pos==chosen.size())
|
||||
{
|
||||
return res;
|
||||
}
|
||||
}while(chosen[pos]+1>=arg_correspondance[pos].size());
|
||||
chosen[pos]++;
|
||||
}
|
||||
}
|
||||
}
|
123
src/muz/transforms/dl_mk_array_instantiation.h
Normal file
123
src/muz/transforms/dl_mk_array_instantiation.h
Normal file
|
@ -0,0 +1,123 @@
|
|||
/*++
|
||||
|
||||
Module Name:
|
||||
|
||||
dl_mk_array_instantiation.h
|
||||
|
||||
Abstract:
|
||||
Transforms predicates so that array invariants can be discovered.
|
||||
|
||||
Motivation : Given a predicate P(a), no quantifier-free solution can express that P(a) <=> forall i, P(a[i]) = 0
|
||||
|
||||
Solution : Introduce a fresh variable i, and transform P(a) into P!inst(i, a).
|
||||
Now, (P!inst(i,a) := a[i] = 0) <=> P(a) := forall i, a[i] = 0.
|
||||
|
||||
Transformation on Horn rules:
|
||||
P(a, args) /\ phi(a, args, args') => P'(args') (for simplicity, assume there are no arrays in args').
|
||||
Is transformed into:
|
||||
(/\_r in read_indices(phi) P!inst(r, a, args)) /\ phi(a, args, args') => P'(args')
|
||||
|
||||
Limitations : This technique can only discover invariants on arrays that depend on one quantifier.
|
||||
Related work : Techniques relying on adding quantifiers and eliminating them. See dl_mk_quantifier_abstraction and dl_mk_quantifier_instantiation
|
||||
|
||||
Implementation:
|
||||
The implementation follows the solution suggested above, with more options. The addition of options implies that in the simple
|
||||
case described above, we in fact have P(a) transformed into P(i, a[i], a).
|
||||
|
||||
1) Dealing with multiple quantifiers -> The options fixedpoint.xform.instantiate_arrays.nb_quantifier gives the number of quantifiers per array.
|
||||
|
||||
2) Inforcing the instantiation -> We suggest an option (enforce_instantiation) to enforce this abstraction. This transforms
|
||||
P(a) into P(i, a[i]). This enforces the solver to limit the space search at the cost of imprecise results. This option
|
||||
corresponds to fixedpoint.xform.instantiate_arrays.enforce
|
||||
|
||||
3) Adding slices in the mix -> We wish to have the possibility to further restrict the search space: we want to smash cells, given a smashing rule.
|
||||
For example, in for loops j=0; j<n; j++, it might be relevant to restrict the search space and look for invariants that only depend on whether
|
||||
0<=i<j or j<=i, where i is the quantified variable.
|
||||
|
||||
Formally, a smashing rule is a function from the Index set (usually integer) to integers (the id set).
|
||||
GetId(i) should return the id of the set i belongs in.
|
||||
|
||||
In our example, we can give 0 as the id of the set {n, 0<=n<j} and 1 for the set {n, j<=n}, and -1 for the set {n, n<0}. We then have
|
||||
GetId(i) = ite(i<0, -1, ite(i<j, 0, 1))
|
||||
|
||||
Given that GetId function, P(a) /\ phi(a, ...) => P'(...) is transformed into
|
||||
(/\_r in read_indices(phi) P!inst(id_r, a[r], a) /\ GetId(r) = id_r) /\ phi(a, ...) => P'(...).
|
||||
Note : when no slicing is done, GetId(i) = i.
|
||||
This option corresponds to fixedpoint.xform.instantiate_arrays.slice_technique
|
||||
|
||||
Although we described GetId as returning integers, there is no reason to restrict the type of ids to integers. A more direct method,
|
||||
for the 0<=i<j or j<=i case could be :
|
||||
GetId(i) = (i<0, i<j)
|
||||
|
||||
GetId is even more powerful as we deal with the multiple quantifiers on multiple arrays.
|
||||
For example, we can use GetId to look for the same quantifiers in each array.
|
||||
Assume we have arrays a and b, instantiated with one quantifier each i and j.
|
||||
We can have GetId(i,j) = ite(i=j, (i, true), (fresh, false))
|
||||
|
||||
4) Reducing the set of r in read_indices(phi): in fact, we do not need to "instantiate" on all read indices of phi,
|
||||
we can restrict ourselves to those "linked" to a, through equalities and stores.
|
||||
|
||||
|
||||
Author:
|
||||
|
||||
Julien Braine
|
||||
|
||||
Revision History:
|
||||
|
||||
--*/
|
||||
|
||||
#ifndef DL_MK_ARRAY_INSTANTIATION_H_
|
||||
#define DL_MK_ARRAY_INSTANTIATION_H_
|
||||
|
||||
|
||||
#include "dl_rule_transformer.h"
|
||||
#include "../spacer/obj_equiv_class.h"
|
||||
|
||||
namespace datalog {
|
||||
|
||||
class context;
|
||||
class mk_array_instantiation : public rule_transformer::plugin {
|
||||
//Context objects
|
||||
ast_manager& m;
|
||||
context& m_ctx;
|
||||
array_util m_a;
|
||||
|
||||
//Rule set context
|
||||
const rule_set*src_set;
|
||||
rule_set*dst;
|
||||
rule_manager* src_manager;
|
||||
|
||||
//Rule context
|
||||
obj_map<expr, ptr_vector<expr> > selects;
|
||||
spacer::expr_equiv_class eq_classes;
|
||||
unsigned cnt;//Index for new variables
|
||||
obj_map<expr, var*> done_selects;
|
||||
expr_ref_vector ownership;
|
||||
|
||||
//Helper functions
|
||||
void instantiate_rule(const rule& r, rule_set & dest);//Instantiates the rule
|
||||
void retrieve_selects(expr* e);//Retrieves all selects (fills the selects and eq_classes members)
|
||||
expr_ref rewrite_select(expr*array, expr*select);//Rewrites select(a, args) to select(array, args)
|
||||
expr_ref_vector retrieve_all_selects(expr*array);//Retrieves all selects linked to a given array (using eq classes and selects)
|
||||
expr_ref_vector instantiate_pred(app*old_pred);//Returns all the instantiation of a given predicate
|
||||
expr_ref create_pred(app*old_pred, expr_ref_vector& new_args);//Creates a predicate
|
||||
expr_ref create_head(app* old_head);//Creates the new head
|
||||
var * mk_select_var(expr* select);
|
||||
|
||||
/*Given the old predicate, and the new arguments for the new predicate, returns the new setId arguments.
|
||||
By default getId(P(x, y, a, b), (x, y, a[i], a[j], a, b[k], b[l], b)) (nb_quantifier=2, enforce=false)
|
||||
returns (i,j,k,l)
|
||||
So that the final created predicate is P!inst(x, y, a[i], a[j], a, b[k], b[l], b, i, j, k, l)
|
||||
*/
|
||||
expr_ref_vector getId(app*old_pred, const expr_ref_vector& new_args);
|
||||
public:
|
||||
mk_array_instantiation(context & ctx, unsigned priority);
|
||||
rule_set * operator()(rule_set const & source);
|
||||
virtual ~mk_array_instantiation(){}
|
||||
};
|
||||
|
||||
|
||||
|
||||
};
|
||||
|
||||
#endif /* DL_MK_ARRAY_INSTANTIATION_H_ */
|
|
@ -27,6 +27,7 @@ Revision History:
|
|||
#include "muz/transforms/dl_mk_interp_tail_simplifier.h"
|
||||
#include "ast/ast_util.h"
|
||||
|
||||
#include "fixedpoint_params.hpp"
|
||||
namespace datalog {
|
||||
|
||||
// -----------------------------------
|
||||
|
@ -397,6 +398,8 @@ namespace datalog {
|
|||
}
|
||||
|
||||
bool mk_interp_tail_simplifier::propagate_variable_equivalences(rule * r, rule_ref& res) {
|
||||
if (!m_context.get_params ().xform_tail_simplifier_pve ())
|
||||
return false;
|
||||
unsigned u_len = r->get_uninterpreted_tail_size();
|
||||
unsigned len = r->get_tail_size();
|
||||
if (u_len == len) {
|
||||
|
|
|
@ -25,6 +25,7 @@ Revision History:
|
|||
#include "ast/rewriter/rewriter_def.h"
|
||||
#include "muz/transforms/dl_mk_subsumption_checker.h"
|
||||
|
||||
#include "fixedpoint_params.hpp"
|
||||
namespace datalog {
|
||||
|
||||
|
||||
|
@ -328,6 +329,8 @@ namespace datalog {
|
|||
|
||||
rule_set * mk_subsumption_checker::operator()(rule_set const & source) {
|
||||
// TODO mc
|
||||
if (!m_context.get_params ().xform_subsumption_checker())
|
||||
return 0;
|
||||
|
||||
m_have_new_total_rule = false;
|
||||
collect_ground_unconditional_rule_heads(source);
|
||||
|
|
|
@ -33,7 +33,9 @@ Revision History:
|
|||
#include "muz/transforms/dl_mk_quantifier_instantiation.h"
|
||||
#include "muz/transforms/dl_mk_subsumption_checker.h"
|
||||
#include "muz/transforms/dl_mk_scale.h"
|
||||
#include"fixedpoint_params.hpp"
|
||||
#include "muz/transforms/dl_mk_array_eq_rewrite.h"
|
||||
#include "muz/transforms/dl_mk_array_instantiation.h"
|
||||
#include "fixedpoint_params.hpp"
|
||||
|
||||
namespace datalog {
|
||||
|
||||
|
@ -46,22 +48,32 @@ namespace datalog {
|
|||
transf.register_plugin(alloc(datalog::mk_coi_filter, ctx));
|
||||
transf.register_plugin(alloc(datalog::mk_interp_tail_simplifier, ctx));
|
||||
|
||||
if (ctx.get_params().xform_instantiate_arrays()) {
|
||||
transf.register_plugin(alloc(datalog::mk_array_instantiation, ctx, 34999));
|
||||
}
|
||||
if(ctx.get_params().xform_transform_arrays())
|
||||
transf.register_plugin(alloc(datalog::mk_array_eq_rewrite, ctx, 34998));
|
||||
if (ctx.get_params().xform_quantify_arrays()) {
|
||||
transf.register_plugin(alloc(datalog::mk_quantifier_abstraction, ctx, 38000));
|
||||
}
|
||||
transf.register_plugin(alloc(datalog::mk_quantifier_instantiation, ctx, 37000));
|
||||
|
||||
if (ctx.get_params().datalog_subsumption()) {
|
||||
transf.register_plugin(alloc(datalog::mk_subsumption_checker, ctx, 35005));
|
||||
}
|
||||
transf.register_plugin(alloc(datalog::mk_rule_inliner, ctx, 35000));
|
||||
transf.register_plugin(alloc(datalog::mk_coi_filter, ctx, 34990));
|
||||
transf.register_plugin(alloc(datalog::mk_interp_tail_simplifier, ctx, 34980));
|
||||
|
||||
//and another round of inlining
|
||||
if (ctx.get_params().datalog_subsumption()) {
|
||||
transf.register_plugin(alloc(datalog::mk_subsumption_checker, ctx, 34975));
|
||||
}
|
||||
transf.register_plugin(alloc(datalog::mk_rule_inliner, ctx, 34970));
|
||||
transf.register_plugin(alloc(datalog::mk_coi_filter, ctx, 34960));
|
||||
transf.register_plugin(alloc(datalog::mk_interp_tail_simplifier, ctx, 34950));
|
||||
|
||||
if (ctx.get_params().datalog_subsumption()) {
|
||||
transf.register_plugin(alloc(datalog::mk_subsumption_checker, ctx, 34940));
|
||||
transf.register_plugin(alloc(datalog::mk_rule_inliner, ctx, 34930));
|
||||
transf.register_plugin(alloc(datalog::mk_subsumption_checker, ctx, 34920));
|
||||
|
@ -69,12 +81,16 @@ namespace datalog {
|
|||
transf.register_plugin(alloc(datalog::mk_subsumption_checker, ctx, 34900));
|
||||
transf.register_plugin(alloc(datalog::mk_rule_inliner, ctx, 34890));
|
||||
transf.register_plugin(alloc(datalog::mk_subsumption_checker, ctx, 34880));
|
||||
}
|
||||
else {
|
||||
transf.register_plugin(alloc(datalog::mk_rule_inliner, ctx, 34930));
|
||||
}
|
||||
|
||||
transf.register_plugin(alloc(datalog::mk_bit_blast, ctx, 35000));
|
||||
transf.register_plugin(alloc(datalog::mk_karr_invariants, ctx, 36010));
|
||||
transf.register_plugin(alloc(datalog::mk_scale, ctx, 36030));
|
||||
if (!ctx.get_params().xform_quantify_arrays()) {
|
||||
transf.register_plugin(alloc(datalog::mk_array_blast, ctx, 36000));
|
||||
transf.register_plugin(alloc(datalog::mk_array_blast, ctx, 35999));
|
||||
}
|
||||
if (ctx.get_params().xform_magic()) {
|
||||
transf.register_plugin(alloc(datalog::mk_magic_symbolic, ctx, 36020));
|
||||
|
|
|
@ -49,6 +49,9 @@ void smt_params::updt_local_params(params_ref const & _p) {
|
|||
else if (_p.get_bool("arith.least_error_pivot", false))
|
||||
m_arith_pivot_strategy = ARITH_PIVOT_LEAST_ERROR;
|
||||
theory_array_params::updt_params(_p);
|
||||
m_dump_benchmarks = false;
|
||||
m_dump_min_time = 0.5;
|
||||
m_dump_recheck = false;
|
||||
}
|
||||
|
||||
void smt_params::updt_params(params_ref const & p) {
|
||||
|
|
|
@ -217,6 +217,15 @@ struct smt_params : public preprocessor_params,
|
|||
bool m_dump_goal_as_smt;
|
||||
bool m_auto_config;
|
||||
|
||||
// -----------------------------------
|
||||
//
|
||||
// Spacer hacking
|
||||
//
|
||||
// -----------------------------------
|
||||
bool m_dump_benchmarks;
|
||||
double m_dump_min_time;
|
||||
bool m_dump_recheck;
|
||||
|
||||
// -----------------------------------
|
||||
//
|
||||
// Solver selection
|
||||
|
|
|
@ -3548,7 +3548,7 @@ namespace smt {
|
|||
return false;
|
||||
}
|
||||
if (cmr == quantifier_manager::UNKNOWN) {
|
||||
IF_VERBOSE(1, verbose_stream() << "(smt.giveup quantifiers)\n";);
|
||||
IF_VERBOSE(2, verbose_stream() << "(smt.giveup quantifiers)\n";);
|
||||
// giving up
|
||||
m_last_search_failure = QUANTIFIERS;
|
||||
status = l_undef;
|
||||
|
@ -3558,7 +3558,7 @@ namespace smt {
|
|||
inc_limits();
|
||||
if (status == l_true || !m_fparams.m_restart_adaptive || m_agility < m_fparams.m_restart_agility_threshold) {
|
||||
SASSERT(!inconsistent());
|
||||
IF_VERBOSE(1, verbose_stream() << "(smt.restarting :propagations " << m_stats.m_num_propagations
|
||||
IF_VERBOSE(2, verbose_stream() << "(smt.restarting :propagations " << m_stats.m_num_propagations
|
||||
<< " :decisions " << m_stats.m_num_decisions
|
||||
<< " :conflicts " << m_stats.m_num_conflicts << " :restart " << m_restart_threshold;
|
||||
if (m_fparams.m_restart_strategy == RS_IN_OUT_GEOMETRIC) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue