mirror of
https://github.com/Z3Prover/z3
synced 2025-04-07 09:55:19 +00:00
Merge branch 'unstable' of https://git01.codeplex.com/z3 into fpa-api
This commit is contained in:
commit
d1d038da35
scripts
src
api
ast
cmd_context
duality
duality.hduality_profiling.cppduality_rpfp.cppduality_solver.cppduality_wrapper.cppduality_wrapper.h
interp
iz3base.cppiz3base.hiz3checker.cppiz3foci.cppiz3hash.hiz3interp.cppiz3interp.hiz3mgr.cppiz3mgr.hiz3pp.cppiz3profiling.cppiz3proof.cppiz3proof_itp.cppiz3proof_itp.hiz3scopes.cppiz3translate.cppiz3translate_direct.cpp
muz
smt
params
smt_conflict_resolution.cppsmt_model_checker.cppsmt_quantifier.cppsmt_quantifier.htheory_arith_aux.htactic
arith
fpa
portfolio
util
|
@ -55,8 +55,8 @@ def init_project_def():
|
|||
add_lib('fpa', ['core_tactics', 'bv_tactics', 'sat_tactic'], 'tactic/fpa')
|
||||
add_lib('smt_tactic', ['smt'], 'smt/tactic')
|
||||
add_lib('sls_tactic', ['tactic', 'normal_forms', 'core_tactics', 'bv_tactics'], 'tactic/sls')
|
||||
add_lib('duality', ['smt', 'interp'])
|
||||
add_lib('qe', ['smt','sat'], 'qe')
|
||||
add_lib('duality', ['smt', 'interp', 'qe'])
|
||||
add_lib('muz', ['smt', 'sat', 'smt2parser', 'aig_tactic', 'qe'], 'muz/base')
|
||||
add_lib('transforms', ['muz', 'hilbert'], 'muz/transforms')
|
||||
add_lib('rel', ['muz', 'transforms'], 'muz/rel')
|
||||
|
|
|
@ -638,7 +638,13 @@ def is_compiler(given, expected):
|
|||
def is_CXX_gpp():
|
||||
return is_compiler(CXX, 'g++')
|
||||
|
||||
def is_clang_in_gpp_form(cc):
|
||||
version_string = subprocess.check_output([cc, '--version'])
|
||||
return str(version_string).find('clang') != -1
|
||||
|
||||
def is_CXX_clangpp():
|
||||
if is_compiler(CXX, 'g++'):
|
||||
return is_clang_in_gpp_form(CXX)
|
||||
return is_compiler(CXX, 'clang++')
|
||||
|
||||
def get_cpp_files(path):
|
||||
|
@ -1192,9 +1198,9 @@ class JavaDLLComponent(Component):
|
|||
deps += '%s ' % os.path.join(self.to_src_dir, 'enumerations', jfile)
|
||||
out.write(deps)
|
||||
out.write('\n')
|
||||
if IS_WINDOWS:
|
||||
JAVAC = '"%s"' % JAVAC
|
||||
JAR = '"%s"' % JAR
|
||||
#if IS_WINDOWS:
|
||||
JAVAC = '"%s"' % JAVAC
|
||||
JAR = '"%s"' % JAR
|
||||
t = ('\t%s %s.java -d %s\n' % (JAVAC, os.path.join(self.to_src_dir, 'enumerations', '*'), os.path.join('api', 'java', 'classes')))
|
||||
out.write(t)
|
||||
t = ('\t%s -cp %s %s.java -d %s\n' % (JAVAC,
|
||||
|
@ -1431,7 +1437,7 @@ def mk_config():
|
|||
'SO_EXT=.dll\n'
|
||||
'SLINK=cl\n'
|
||||
'SLINK_OUT_FLAG=/Fe\n'
|
||||
'OS_DEFINES=/D _WINDOWS\n')
|
||||
'OS_DEFINES=/D _WINDOWS\n')
|
||||
extra_opt = ''
|
||||
if GIT_HASH:
|
||||
extra_opt = '%s /D Z3GITHASH=%s' % (extra_opt, GIT_HASH)
|
||||
|
@ -1479,7 +1485,7 @@ def mk_config():
|
|||
print('Java Compiler: %s' % JAVAC)
|
||||
else:
|
||||
global CXX, CC, GMP, FOCI2, CPPFLAGS, CXXFLAGS, LDFLAGS, EXAMP_DEBUG_FLAG
|
||||
OS_DEFINES = ""
|
||||
OS_DEFINES = ""
|
||||
ARITH = "internal"
|
||||
check_ar()
|
||||
CXX = find_cxx_compiler()
|
||||
|
@ -1502,7 +1508,7 @@ def mk_config():
|
|||
SLIBEXTRAFLAGS = '%s %s' % (SLIBEXTRAFLAGS,FOCI2LIB)
|
||||
CPPFLAGS = '%s -D_FOCI2' % CPPFLAGS
|
||||
else:
|
||||
print "FAILED\n"
|
||||
print("FAILED\n")
|
||||
FOCI2 = False
|
||||
if GIT_HASH:
|
||||
CPPFLAGS = '%s -DZ3GITHASH=%s' % (CPPFLAGS, GIT_HASH)
|
||||
|
@ -1530,21 +1536,21 @@ def mk_config():
|
|||
SLIBFLAGS = '-dynamiclib'
|
||||
elif sysname == 'Linux':
|
||||
CXXFLAGS = '%s -fno-strict-aliasing -D_LINUX_' % CXXFLAGS
|
||||
OS_DEFINES = '-D_LINUX'
|
||||
OS_DEFINES = '-D_LINUX'
|
||||
SO_EXT = '.so'
|
||||
LDFLAGS = '%s -lrt' % LDFLAGS
|
||||
SLIBFLAGS = '-shared'
|
||||
SLIBEXTRAFLAGS = '%s -lrt' % SLIBEXTRAFLAGS
|
||||
elif sysname == 'FreeBSD':
|
||||
CXXFLAGS = '%s -fno-strict-aliasing -D_FREEBSD_' % CXXFLAGS
|
||||
OS_DEFINES = '-D_FREEBSD_'
|
||||
OS_DEFINES = '-D_FREEBSD_'
|
||||
SO_EXT = '.so'
|
||||
LDFLAGS = '%s -lrt' % LDFLAGS
|
||||
SLIBFLAGS = '-shared'
|
||||
SLIBEXTRAFLAGS = '%s -lrt' % SLIBEXTRAFLAGS
|
||||
elif sysname[:6] == 'CYGWIN':
|
||||
CXXFLAGS = '%s -D_CYGWIN -fno-strict-aliasing' % CXXFLAGS
|
||||
OS_DEFINES = '-D_CYGWIN'
|
||||
OS_DEFINES = '-D_CYGWIN'
|
||||
SO_EXT = '.dll'
|
||||
SLIBFLAGS = '-shared'
|
||||
else:
|
||||
|
@ -1580,7 +1586,7 @@ def mk_config():
|
|||
config.write('SLINK_FLAGS=%s\n' % SLIBFLAGS)
|
||||
config.write('SLINK_EXTRA_FLAGS=%s\n' % SLIBEXTRAFLAGS)
|
||||
config.write('SLINK_OUT_FLAG=-o \n')
|
||||
config.write('OS_DEFINES=%s\n' % OS_DEFINES)
|
||||
config.write('OS_DEFINES=%s\n' % OS_DEFINES)
|
||||
if is_verbose():
|
||||
print('Host platform: %s' % sysname)
|
||||
print('C++ Compiler: %s' % CXX)
|
||||
|
|
|
@ -523,7 +523,7 @@ def mk_java():
|
|||
java_native.write(' public static class LongPtr { public long value; }\n')
|
||||
java_native.write(' public static class StringPtr { public String value; }\n')
|
||||
java_native.write(' public static native void setInternalErrorHandler(long ctx);\n\n')
|
||||
if IS_WINDOWS:
|
||||
if IS_WINDOWS or os.uname()[0]=="CYGWIN":
|
||||
java_native.write(' static { System.loadLibrary("%s"); }\n' % get_component('java').dll_name)
|
||||
else:
|
||||
java_native.write(' static { System.loadLibrary("%s"); }\n' % get_component('java').dll_name[3:]) # We need 3: to extract the prexi 'lib' form the dll_name
|
||||
|
@ -588,6 +588,9 @@ def mk_java():
|
|||
java_wrapper = open(java_wrapperf, 'w')
|
||||
pkg_str = get_component('java').package_name.replace('.', '_')
|
||||
java_wrapper.write('// Automatically generated file\n')
|
||||
java_wrapper.write('#ifdef _CYGWIN\n')
|
||||
java_wrapper.write('typedef long long __int64;\n')
|
||||
java_wrapper.write('#endif\n')
|
||||
java_wrapper.write('#include<jni.h>\n')
|
||||
java_wrapper.write('#include<stdlib.h>\n')
|
||||
java_wrapper.write('#include"z3.h"\n')
|
||||
|
|
|
@ -17,6 +17,7 @@ Revision History:
|
|||
--*/
|
||||
#include<iostream>
|
||||
#include<sstream>
|
||||
#include<vector>
|
||||
#include"z3.h"
|
||||
#include"api_log_macros.h"
|
||||
#include"api_context.h"
|
||||
|
@ -42,6 +43,20 @@ Revision History:
|
|||
using namespace stl_ext;
|
||||
#endif
|
||||
|
||||
#ifndef WIN32
|
||||
// WARNING: don't make a hash_map with this if the range type
|
||||
// has a destructor: you'll get an address dependency!!!
|
||||
namespace stl_ext {
|
||||
template <>
|
||||
class hash<Z3_ast> {
|
||||
public:
|
||||
size_t operator()(const Z3_ast p) const {
|
||||
return (size_t) p;
|
||||
}
|
||||
};
|
||||
}
|
||||
#endif
|
||||
|
||||
typedef interpolation_options_struct *Z3_interpolation_options;
|
||||
|
||||
extern "C" {
|
||||
|
@ -305,8 +320,8 @@ static void get_file_params(const char *filename, hash_map<std::string,std::stri
|
|||
tokenize(first_line.substr(2,first_line.size()-2),tokens);
|
||||
for(unsigned i = 0; i < tokens.size(); i++){
|
||||
std::string &tok = tokens[i];
|
||||
int eqpos = tok.find('=');
|
||||
if(eqpos >= 0 && eqpos < (int)tok.size()){
|
||||
size_t eqpos = tok.find('=');
|
||||
if(eqpos >= 0 && eqpos < tok.size()){
|
||||
std::string left = tok.substr(0,eqpos);
|
||||
std::string right = tok.substr(eqpos+1,tok.size()-eqpos-1);
|
||||
params[left] = right;
|
||||
|
@ -363,8 +378,8 @@ extern "C" {
|
|||
#else
|
||||
|
||||
|
||||
static Z3_ast and_vec(Z3_context ctx,std::vector<Z3_ast> &c){
|
||||
return (c.size() > 1) ? Z3_mk_and(ctx,c.size(),&c[0]) : c[0];
|
||||
static Z3_ast and_vec(Z3_context ctx,svector<Z3_ast> &c){
|
||||
return (c.size() > 1) ? Z3_mk_and(ctx,c.size(),&c[0]) : c[0];
|
||||
}
|
||||
|
||||
static Z3_ast parents_vector_to_tree(Z3_context ctx, int num, Z3_ast *cnsts, int *parents){
|
||||
|
@ -381,15 +396,15 @@ extern "C" {
|
|||
}
|
||||
}
|
||||
else {
|
||||
std::vector<std::vector<Z3_ast> > chs(num);
|
||||
std::vector<svector<Z3_ast> > chs(num);
|
||||
for(int i = 0; i < num-1; i++){
|
||||
std::vector<Z3_ast> &c = chs[i];
|
||||
svector<Z3_ast> &c = chs[i];
|
||||
c.push_back(cnsts[i]);
|
||||
Z3_ast foo = Z3_mk_interp(ctx,and_vec(ctx,c));
|
||||
chs[parents[i]].push_back(foo);
|
||||
}
|
||||
{
|
||||
std::vector<Z3_ast> &c = chs[num-1];
|
||||
svector<Z3_ast> &c = chs[num-1];
|
||||
c.push_back(cnsts[num-1]);
|
||||
res = and_vec(ctx,c);
|
||||
}
|
||||
|
@ -454,7 +469,7 @@ extern "C" {
|
|||
static std::string read_msg;
|
||||
static std::vector<Z3_ast> read_theory;
|
||||
|
||||
static bool iZ3_parse(Z3_context ctx, const char *filename, const char **error, std::vector<Z3_ast> &assertions){
|
||||
static bool iZ3_parse(Z3_context ctx, const char *filename, const char **error, svector<Z3_ast> &assertions){
|
||||
read_error.clear();
|
||||
try {
|
||||
std::string foo(filename);
|
||||
|
@ -496,26 +511,26 @@ extern "C" {
|
|||
|
||||
hash_map<std::string,std::string> file_params;
|
||||
get_file_params(filename,file_params);
|
||||
|
||||
int num_theory = 0;
|
||||
|
||||
unsigned num_theory = 0;
|
||||
if(file_params.find("THEORY") != file_params.end())
|
||||
num_theory = atoi(file_params["THEORY"].c_str());
|
||||
|
||||
std::vector<Z3_ast> assertions;
|
||||
svector<Z3_ast> assertions;
|
||||
if(!iZ3_parse(ctx,filename,error,assertions))
|
||||
return false;
|
||||
|
||||
if(num_theory > (int)assertions.size())
|
||||
num_theory = assertions.size();
|
||||
int num = assertions.size() - num_theory;
|
||||
if(num_theory > assertions.size())
|
||||
num_theory = assertions.size();
|
||||
unsigned num = assertions.size() - num_theory;
|
||||
|
||||
read_cnsts.resize(num);
|
||||
read_parents.resize(num);
|
||||
read_theory.resize(num_theory);
|
||||
|
||||
for(int j = 0; j < num_theory; j++)
|
||||
for(unsigned j = 0; j < num_theory; j++)
|
||||
read_theory[j] = assertions[j];
|
||||
for(int j = 0; j < num; j++)
|
||||
for(unsigned j = 0; j < num; j++)
|
||||
read_cnsts[j] = assertions[j+num_theory];
|
||||
|
||||
if(ret_num_theory)
|
||||
|
@ -529,12 +544,12 @@ extern "C" {
|
|||
return true;
|
||||
}
|
||||
|
||||
for(int j = 0; j < num; j++)
|
||||
for(unsigned j = 0; j < num; j++)
|
||||
read_parents[j] = SHRT_MAX;
|
||||
|
||||
hash_map<Z3_ast,int> pred_map;
|
||||
|
||||
for(int j = 0; j < num; j++){
|
||||
for(unsigned j = 0; j < num; j++){
|
||||
Z3_ast lhs = 0, rhs = read_cnsts[j];
|
||||
|
||||
if(Z3_get_decl_kind(ctx,Z3_get_app_decl(ctx,Z3_to_app(ctx,rhs))) == Z3_OP_IMPLIES){
|
||||
|
@ -588,7 +603,7 @@ extern "C" {
|
|||
}
|
||||
}
|
||||
|
||||
for(int j = 0; j < num-1; j++)
|
||||
for(unsigned j = 0; j < num-1; j++)
|
||||
if(read_parents[j] == SHRT_MIN){
|
||||
read_error << "formula " << j+1 << ": unreferenced";
|
||||
goto fail;
|
||||
|
|
|
@ -35,7 +35,7 @@ public class BitVecNum extends BitVecExpr
|
|||
{
|
||||
Native.LongPtr res = new Native.LongPtr();
|
||||
if (Native.getNumeralInt64(getContext().nCtx(), getNativeObject(), res) ^ true)
|
||||
throw new Z3Exception("Numeral is not an int64");
|
||||
throw new Z3Exception("Numeral is not a long");
|
||||
return res.value;
|
||||
}
|
||||
|
||||
|
|
|
@ -586,7 +586,7 @@ class FuncDeclRef(AstRef):
|
|||
return Z3_func_decl_to_ast(self.ctx_ref(), self.ast)
|
||||
|
||||
def as_func_decl(self):
|
||||
return self.ast
|
||||
return self.ast
|
||||
|
||||
def name(self):
|
||||
"""Return the name of the function declaration `self`.
|
||||
|
|
|
@ -1850,6 +1850,7 @@ func_decl * ast_manager::mk_func_decl(symbol const & name, unsigned arity, sort
|
|||
|
||||
void ast_manager::check_sort(func_decl const * decl, unsigned num_args, expr * const * args) const {
|
||||
ast_manager& m = const_cast<ast_manager&>(*this);
|
||||
|
||||
if (decl->is_associative()) {
|
||||
sort * expected = decl->get_domain(0);
|
||||
for (unsigned i = 0; i < num_args; i++) {
|
||||
|
@ -1894,7 +1895,18 @@ void ast_manager::check_sorts_core(ast const * n) const {
|
|||
if (n->get_kind() != AST_APP)
|
||||
return; // nothing else to check...
|
||||
app const * a = to_app(n);
|
||||
check_sort(a->get_decl(), a->get_num_args(), a->get_args());
|
||||
func_decl* d = a->get_decl();
|
||||
check_sort(d, a->get_num_args(), a->get_args());
|
||||
if (a->get_num_args() == 2 &&
|
||||
!d->is_flat_associative() &&
|
||||
d->is_right_associative()) {
|
||||
check_sorts_core(a->get_arg(1));
|
||||
}
|
||||
if (a->get_num_args() == 2 &&
|
||||
!d->is_flat_associative() &&
|
||||
d->is_left_associative()) {
|
||||
check_sorts_core(a->get_arg(0));
|
||||
}
|
||||
}
|
||||
|
||||
bool ast_manager::check_sorts(ast const * n) const {
|
||||
|
@ -3161,3 +3173,6 @@ void prexpr(expr_ref &e){
|
|||
std::cout << mk_pp(e.get(), e.get_manager()) << std::endl;
|
||||
}
|
||||
|
||||
void ast_manager::show_id_gen(){
|
||||
std::cout << "id_gen: " << m_expr_id_gen.show_hash() << " " << m_decl_id_gen.show_hash() << "\n";
|
||||
}
|
||||
|
|
|
@ -1418,6 +1418,8 @@ protected:
|
|||
public:
|
||||
typedef expr_dependency_array_manager::ref expr_dependency_array;
|
||||
|
||||
void show_id_gen();
|
||||
|
||||
protected:
|
||||
small_object_allocator m_alloc;
|
||||
family_manager m_family_manager;
|
||||
|
|
|
@ -213,6 +213,9 @@ func_decl * float_decl_plugin::mk_float_const_decl(decl_kind k, unsigned num_par
|
|||
if (num_parameters == 1 && parameters[0].is_ast() && is_sort(parameters[0].get_ast()) && is_float_sort(to_sort(parameters[0].get_ast()))) {
|
||||
s = to_sort(parameters[0].get_ast());
|
||||
}
|
||||
else if (num_parameters == 2 && parameters[0].is_int() && parameters[1].is_int()) {
|
||||
s = mk_float_sort(parameters[0].get_int(), parameters[1].get_int());
|
||||
}
|
||||
else if (range != 0 && is_float_sort(range)) {
|
||||
s = range;
|
||||
}
|
||||
|
@ -376,7 +379,19 @@ func_decl * float_decl_plugin::mk_to_float(decl_kind k, unsigned num_parameters,
|
|||
sort * fp = mk_float_sort(domain[2]->get_parameter(0).get_int(), domain[1]->get_parameter(0).get_int()+1);
|
||||
symbol name("asFloat");
|
||||
return m_manager->mk_func_decl(name, arity, domain, fp, func_decl_info(m_family_id, k, num_parameters, parameters));
|
||||
}
|
||||
}
|
||||
else if (m_bv_plugin && arity == 1 && is_sort_of(domain[0], m_bv_fid, BV_SORT)) {
|
||||
if (num_parameters != 2)
|
||||
m_manager->raise_exception("invalid number of parameters to to_fp");
|
||||
if (!parameters[0].is_int() || !parameters[1].is_int())
|
||||
m_manager->raise_exception("invalid parameter type to to_fp");
|
||||
int ebits = parameters[0].get_int();
|
||||
int sbits = parameters[1].get_int();
|
||||
|
||||
sort * fp = mk_float_sort(ebits, sbits);
|
||||
symbol name("asFloat");
|
||||
return m_manager->mk_func_decl(name, arity, domain, fp, func_decl_info(m_family_id, k, num_parameters, parameters));
|
||||
}
|
||||
else {
|
||||
// .. Otherwise we only know how to convert rationals/reals.
|
||||
if (!(num_parameters == 2 && parameters[0].is_int() && parameters[1].is_int()))
|
||||
|
@ -412,6 +427,53 @@ func_decl * float_decl_plugin::mk_to_ieee_bv(decl_kind k, unsigned num_parameter
|
|||
return m_manager->mk_func_decl(name, 1, domain, bv_srt, func_decl_info(m_family_id, k, num_parameters, parameters));
|
||||
}
|
||||
|
||||
func_decl * float_decl_plugin::mk_from3bv(decl_kind k, unsigned num_parameters, parameter const * parameters,
|
||||
unsigned arity, sort * const * domain, sort * range) {
|
||||
if (!m_bv_plugin)
|
||||
m_manager->raise_exception("fp unsupported; use a logic with BV support");
|
||||
if (arity != 3)
|
||||
m_manager->raise_exception("invalid number of arguments to fp");
|
||||
if (!is_sort_of(domain[0], m_bv_fid, BV_SORT) ||
|
||||
!is_sort_of(domain[1], m_bv_fid, BV_SORT) ||
|
||||
!is_sort_of(domain[2], m_bv_fid, BV_SORT))
|
||||
m_manager->raise_exception("sort mismtach");
|
||||
|
||||
sort * fp = mk_float_sort(domain[1]->get_parameter(0).get_int(), domain[2]->get_parameter(0).get_int() + 1);
|
||||
symbol name("fp");
|
||||
return m_manager->mk_func_decl(name, arity, domain, fp, func_decl_info(m_family_id, k));
|
||||
}
|
||||
|
||||
func_decl * float_decl_plugin::mk_to_fp_unsigned(decl_kind k, unsigned num_parameters, parameter const * parameters,
|
||||
unsigned arity, sort * const * domain, sort * range) {
|
||||
if (!m_bv_plugin)
|
||||
m_manager->raise_exception("to_fp_unsigned unsupported; use a logic with BV support");
|
||||
if (arity != 2)
|
||||
m_manager->raise_exception("invalid number of arguments to to_fp_unsigned");
|
||||
if (is_rm_sort(domain[0]))
|
||||
m_manager->raise_exception("sort mismtach");
|
||||
if (!is_sort_of(domain[1], m_bv_fid, BV_SORT))
|
||||
m_manager->raise_exception("sort mismtach");
|
||||
|
||||
sort * fp = mk_float_sort(parameters[0].get_int(), parameters[1].get_int());
|
||||
symbol name("to_fp_unsigned");
|
||||
return m_manager->mk_func_decl(name, arity, domain, fp, func_decl_info(m_family_id, k));
|
||||
}
|
||||
|
||||
func_decl * float_decl_plugin::mk_to_ubv(decl_kind k, unsigned num_parameters, parameter const * parameters,
|
||||
unsigned arity, sort * const * domain, sort * range) {
|
||||
NOT_IMPLEMENTED_YET();
|
||||
}
|
||||
|
||||
func_decl * float_decl_plugin::mk_to_sbv(decl_kind k, unsigned num_parameters, parameter const * parameters,
|
||||
unsigned arity, sort * const * domain, sort * range) {
|
||||
NOT_IMPLEMENTED_YET();
|
||||
}
|
||||
|
||||
func_decl * float_decl_plugin::mk_to_real(decl_kind k, unsigned num_parameters, parameter const * parameters,
|
||||
unsigned arity, sort * const * domain, sort * range) {
|
||||
NOT_IMPLEMENTED_YET();
|
||||
}
|
||||
|
||||
func_decl * float_decl_plugin::mk_func_decl(decl_kind k, unsigned num_parameters, parameter const * parameters,
|
||||
unsigned arity, sort * const * domain, sort * range) {
|
||||
switch (k) {
|
||||
|
@ -465,6 +527,16 @@ func_decl * float_decl_plugin::mk_func_decl(decl_kind k, unsigned num_parameters
|
|||
return mk_fused_ma(k, num_parameters, parameters, arity, domain, range);
|
||||
case OP_TO_IEEE_BV:
|
||||
return mk_to_ieee_bv(k, num_parameters, parameters, arity, domain, range);
|
||||
case OP_FLOAT_FP:
|
||||
return mk_from3bv(k, num_parameters, parameters, arity, domain, range);
|
||||
case OP_FLOAT_TO_FP_UNSIGNED:
|
||||
return mk_to_fp_unsigned(k, num_parameters, parameters, arity, domain, range);
|
||||
case OP_FLOAT_TO_UBV:
|
||||
return mk_to_ubv(k, num_parameters, parameters, arity, domain, range);
|
||||
case OP_FLOAT_TO_SBV:
|
||||
return mk_to_sbv(k, num_parameters, parameters, arity, domain, range);
|
||||
case OP_FLOAT_TO_REAL:
|
||||
return mk_to_real(k, num_parameters, parameters, arity, domain, range);
|
||||
default:
|
||||
m_manager->raise_exception("unsupported floating point operator");
|
||||
return 0;
|
||||
|
@ -517,8 +589,9 @@ void float_decl_plugin::get_op_names(svector<builtin_name> & op_names, symbol co
|
|||
if (m_bv_plugin)
|
||||
op_names.push_back(builtin_name("asIEEEBV", OP_TO_IEEE_BV));
|
||||
|
||||
// We also support draft version 3
|
||||
op_names.push_back(builtin_name("fp", OP_TO_FLOAT));
|
||||
// These are the operators from the final draft of the SMT FloatingPoints standard
|
||||
op_names.push_back(builtin_name("+oo", OP_FLOAT_PLUS_INF));
|
||||
op_names.push_back(builtin_name("-oo", OP_FLOAT_MINUS_INF));
|
||||
|
||||
op_names.push_back(builtin_name("RNE", OP_RM_NEAREST_TIES_TO_EVEN));
|
||||
op_names.push_back(builtin_name("RNA", OP_RM_NEAREST_TIES_TO_AWAY));
|
||||
|
@ -547,23 +620,24 @@ void float_decl_plugin::get_op_names(svector<builtin_name> & op_names, symbol co
|
|||
op_names.push_back(builtin_name("fp.isNaN", OP_FLOAT_IS_NAN));
|
||||
op_names.push_back(builtin_name("fp.min", OP_FLOAT_MIN));
|
||||
op_names.push_back(builtin_name("fp.max", OP_FLOAT_MAX));
|
||||
op_names.push_back(builtin_name("fp.convert", OP_TO_FLOAT));
|
||||
op_names.push_back(builtin_name("to_fp", OP_TO_FLOAT));
|
||||
|
||||
if (m_bv_plugin) {
|
||||
// op_names.push_back(builtin_name("fp.fromBv", OP_TO_FLOAT));
|
||||
// op_names.push_back(builtin_name("fp.fromUBv", OP_TO_FLOAT));
|
||||
// op_names.push_back(builtin_name("fp.fromSBv", OP_TO_FLOAT));
|
||||
// op_names.push_back(builtin_name("fp.toUBv", OP_TO_IEEE_BV));
|
||||
// op_names.push_back(builtin_name("fp.toSBv", OP_TO_IEEE_BV));
|
||||
op_names.push_back(builtin_name("fp", OP_FLOAT_FP));
|
||||
op_names.push_back(builtin_name("to_fp_unsigned", OP_FLOAT_TO_FP_UNSIGNED));
|
||||
op_names.push_back(builtin_name("fp.to_ubv", OP_FLOAT_TO_UBV));
|
||||
op_names.push_back(builtin_name("fp.to_sbv", OP_FLOAT_TO_SBV));
|
||||
}
|
||||
|
||||
op_names.push_back(builtin_name("fp.fromReal", OP_TO_FLOAT));
|
||||
|
||||
// op_names.push_back(builtin_name("fp.toReal", ?));
|
||||
}
|
||||
|
||||
void float_decl_plugin::get_sort_names(svector<builtin_name> & sort_names, symbol const & logic) {
|
||||
sort_names.push_back(builtin_name("FP", FLOAT_SORT));
|
||||
sort_names.push_back(builtin_name("RoundingMode", ROUNDING_MODE_SORT));
|
||||
|
||||
// In the SMT FPA final draft, FP is called FloatingPoint
|
||||
sort_names.push_back(builtin_name("FloatingPoint", FLOAT_SORT));
|
||||
}
|
||||
|
||||
expr * float_decl_plugin::get_some_value(sort * s) {
|
||||
|
|
|
@ -72,6 +72,12 @@ enum float_op_kind {
|
|||
|
||||
OP_TO_FLOAT,
|
||||
OP_TO_IEEE_BV,
|
||||
|
||||
OP_FLOAT_FP,
|
||||
OP_FLOAT_TO_FP_UNSIGNED,
|
||||
OP_FLOAT_TO_UBV,
|
||||
OP_FLOAT_TO_SBV,
|
||||
OP_FLOAT_TO_REAL,
|
||||
|
||||
LAST_FLOAT_OP
|
||||
};
|
||||
|
@ -125,7 +131,17 @@ class float_decl_plugin : public decl_plugin {
|
|||
unsigned arity, sort * const * domain, sort * range);
|
||||
func_decl * mk_to_ieee_bv(decl_kind k, unsigned num_parameters, parameter const * parameters,
|
||||
unsigned arity, sort * const * domain, sort * range);
|
||||
|
||||
func_decl * mk_from3bv(decl_kind k, unsigned num_parameters, parameter const * parameters,
|
||||
unsigned arity, sort * const * domain, sort * range);
|
||||
func_decl * mk_to_fp_unsigned(decl_kind k, unsigned num_parameters, parameter const * parameters,
|
||||
unsigned arity, sort * const * domain, sort * range);
|
||||
func_decl * mk_to_ubv(decl_kind k, unsigned num_parameters, parameter const * parameters,
|
||||
unsigned arity, sort * const * domain, sort * range);
|
||||
func_decl * mk_to_sbv(decl_kind k, unsigned num_parameters, parameter const * parameters,
|
||||
unsigned arity, sort * const * domain, sort * range);
|
||||
func_decl * mk_to_real(decl_kind k, unsigned num_parameters, parameter const * parameters,
|
||||
unsigned arity, sort * const * domain, sort * range);
|
||||
|
||||
virtual void set_manager(ast_manager * m, family_id id);
|
||||
unsigned mk_id(mpf const & v);
|
||||
void recycled_id(unsigned id);
|
||||
|
|
|
@ -479,7 +479,7 @@ bool proof_checker::check1_basic(proof* p, expr_ref_vector& side_conditions) {
|
|||
// otherwise t2 is also a quantifier.
|
||||
return true;
|
||||
}
|
||||
UNREACHABLE();
|
||||
IF_VERBOSE(0, verbose_stream() << "does not match last rule: " << mk_pp(p, m) << "\n";);
|
||||
return false;
|
||||
}
|
||||
case PR_DER: {
|
||||
|
@ -488,13 +488,12 @@ bool proof_checker::check1_basic(proof* p, expr_ref_vector& side_conditions) {
|
|||
match_fact(p, fact) &&
|
||||
match_iff(fact.get(), t1, t2) &&
|
||||
match_quantifier(t1, is_forall, decls1, body1) &&
|
||||
is_forall &&
|
||||
match_or(body1.get(), terms1)) {
|
||||
is_forall) {
|
||||
// TBD: check that terms are set of equalities.
|
||||
// t2 is an instance of a predicate in terms1
|
||||
return true;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
IF_VERBOSE(0, verbose_stream() << "does not match last rule: " << mk_pp(p, m) << "\n";);
|
||||
return false;
|
||||
}
|
||||
case PR_HYPOTHESIS: {
|
||||
|
@ -832,7 +831,7 @@ bool proof_checker::check1_basic(proof* p, expr_ref_vector& side_conditions) {
|
|||
}
|
||||
else {
|
||||
IF_VERBOSE(0, verbose_stream() << "Could not establish complementarity for:\n" <<
|
||||
mk_pp(lit1, m) << "\n" << mk_pp(lit2, m) << "\n";);
|
||||
mk_pp(lit1, m) << "\n" << mk_pp(lit2, m) << "\n" << mk_pp(p, m) << "\n";);
|
||||
}
|
||||
fmls[i] = premise1;
|
||||
}
|
||||
|
|
|
@ -64,6 +64,11 @@ br_status float_rewriter::mk_app_core(func_decl * f, unsigned num_args, expr * c
|
|||
case OP_FLOAT_IS_SUBNORMAL: SASSERT(num_args == 1); st = mk_is_subnormal(args[0], result); break;
|
||||
case OP_FLOAT_IS_SIGN_MINUS: SASSERT(num_args == 1); st = mk_is_sign_minus(args[0], result); break;
|
||||
case OP_TO_IEEE_BV: SASSERT(num_args == 1); st = mk_to_ieee_bv(args[0], result); break;
|
||||
case OP_FLOAT_FP: SASSERT(num_args == 3); st = mk_fp(args[0], args[1], args[2], result); break;
|
||||
case OP_FLOAT_TO_FP_UNSIGNED: SASSERT(num_args == 2); st = mk_to_fp_unsigned(args[0], args[1], result); break;
|
||||
case OP_FLOAT_TO_UBV: SASSERT(num_args == 2); st = mk_to_ubv(args[0], args[1], result); break;
|
||||
case OP_FLOAT_TO_SBV: SASSERT(num_args == 2); st = mk_to_sbv(args[0], args[1], result); break;
|
||||
case OP_FLOAT_TO_REAL: SASSERT(num_args == 1); st = mk_to_real(args[0], result); break;
|
||||
}
|
||||
return st;
|
||||
}
|
||||
|
@ -504,3 +509,42 @@ br_status float_rewriter::mk_eq_core(expr * arg1, expr * arg2, expr_ref & result
|
|||
br_status float_rewriter::mk_to_ieee_bv(expr * arg1, expr_ref & result) {
|
||||
return BR_FAILED;
|
||||
}
|
||||
|
||||
br_status float_rewriter::mk_fp(expr * arg1, expr * arg2, expr * arg3, expr_ref & result) {
|
||||
bv_util bu(m());
|
||||
rational r1, r2, r3;
|
||||
unsigned bvs1, bvs2, bvs3;
|
||||
|
||||
if (bu.is_numeral(arg1, r1, bvs1) && bu.is_numeral(arg2, r2, bvs2) && bu.is_numeral(arg3, r3, bvs3)) {
|
||||
SASSERT(m_util.fm().mpz_manager().is_one(r2.to_mpq().denominator()));
|
||||
SASSERT(m_util.fm().mpz_manager().is_one(r3.to_mpq().denominator()));
|
||||
SASSERT(m_util.fm().mpz_manager().is_int64(r3.to_mpq().numerator()));
|
||||
scoped_mpf v(m_util.fm());
|
||||
mpf_exp_t biased_exp = m_util.fm().mpz_manager().get_int64(r2.to_mpq().numerator());
|
||||
m_util.fm().set(v, bvs2, bvs3 + 1,
|
||||
r1.is_one(),
|
||||
r3.to_mpq().numerator(),
|
||||
m_util.fm().unbias_exp(bvs2, biased_exp));
|
||||
TRACE("fp_rewriter", tout << "v = " << m_util.fm().to_string(v) << std::endl;);
|
||||
result = m_util.mk_value(v);
|
||||
return BR_DONE;
|
||||
}
|
||||
|
||||
return BR_FAILED;
|
||||
}
|
||||
|
||||
br_status float_rewriter::mk_to_fp_unsigned(expr * arg1, expr * arg2, expr_ref & result) {
|
||||
return BR_FAILED;
|
||||
}
|
||||
|
||||
br_status float_rewriter::mk_to_ubv(expr * arg1, expr * arg2, expr_ref & result) {
|
||||
return BR_FAILED;
|
||||
}
|
||||
|
||||
br_status float_rewriter::mk_to_sbv(expr * arg1, expr * arg2, expr_ref & result) {
|
||||
return BR_FAILED;
|
||||
}
|
||||
|
||||
br_status float_rewriter::mk_to_real(expr * arg1, expr_ref & result) {
|
||||
return BR_FAILED;
|
||||
}
|
|
@ -73,6 +73,12 @@ public:
|
|||
br_status mk_is_sign_minus(expr * arg1, expr_ref & result);
|
||||
|
||||
br_status mk_to_ieee_bv(expr * arg1, expr_ref & result);
|
||||
|
||||
br_status mk_fp(expr * arg1, expr * arg2, expr * arg3, expr_ref & result);
|
||||
br_status mk_to_fp_unsigned(expr * arg1, expr * arg2, expr_ref & result);
|
||||
br_status mk_to_ubv(expr * arg1, expr * arg2, expr_ref & result);
|
||||
br_status mk_to_sbv(expr * arg1, expr * arg2, expr_ref & result);
|
||||
br_status mk_to_real(expr * arg1, expr_ref & result);
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -114,7 +114,7 @@ static void get_interpolant_and_maybe_check(cmd_context & ctx, expr * t, params_
|
|||
|
||||
ptr_vector<expr>::const_iterator it = ctx.begin_assertions();
|
||||
ptr_vector<expr>::const_iterator end = ctx.end_assertions();
|
||||
ptr_vector<ast> cnsts(end - it);
|
||||
ptr_vector<ast> cnsts((unsigned)(end - it));
|
||||
for (int i = 0; it != end; ++it, ++i)
|
||||
cnsts[i] = *it;
|
||||
|
||||
|
@ -139,10 +139,11 @@ static void get_interpolant(cmd_context & ctx, expr * t, params_ref &m_params) {
|
|||
get_interpolant_and_maybe_check(ctx,t,m_params,false);
|
||||
}
|
||||
|
||||
#if 0
|
||||
static void get_and_check_interpolant(cmd_context & ctx, params_ref &m_params, expr * t) {
|
||||
get_interpolant_and_maybe_check(ctx,t,m_params,true);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static void compute_interpolant_and_maybe_check(cmd_context & ctx, expr * t, params_ref &m_params, bool check){
|
||||
|
||||
|
|
461
src/duality/duality.h
Normal file → Executable file
461
src/duality/duality.h
Normal file → Executable file
|
@ -25,7 +25,7 @@ Revision History:
|
|||
#include <map>
|
||||
|
||||
// make hash_map and hash_set available
|
||||
#ifndef WIN32
|
||||
#ifndef _WINDOWS
|
||||
using namespace stl_ext;
|
||||
#endif
|
||||
|
||||
|
@ -36,12 +36,11 @@ namespace Duality {
|
|||
struct Z3User {
|
||||
|
||||
context &ctx;
|
||||
solver &slvr;
|
||||
|
||||
typedef func_decl FuncDecl;
|
||||
typedef expr Term;
|
||||
|
||||
Z3User(context &_ctx, solver &_slvr) : ctx(_ctx), slvr(_slvr){}
|
||||
Z3User(context &_ctx) : ctx(_ctx){}
|
||||
|
||||
const char *string_of_int(int n);
|
||||
|
||||
|
@ -53,6 +52,8 @@ namespace Duality {
|
|||
|
||||
Term SubstRec(hash_map<ast, Term> &memo, const Term &t);
|
||||
|
||||
Term SubstRec(hash_map<ast, Term> &memo, hash_map<func_decl, func_decl> &map, const Term &t);
|
||||
|
||||
void Strengthen(Term &x, const Term &y);
|
||||
|
||||
// return the func_del of an app if it is uninterpreted
|
||||
|
@ -77,11 +78,42 @@ namespace Duality {
|
|||
|
||||
void Summarize(const Term &t);
|
||||
|
||||
int CumulativeDecisions();
|
||||
int CountOperators(const Term &t);
|
||||
|
||||
private:
|
||||
Term SubstAtom(hash_map<ast, Term> &memo, const expr &t, const expr &atom, const expr &val);
|
||||
|
||||
Term RemoveRedundancy(const Term &t);
|
||||
|
||||
Term IneqToEq(const Term &t);
|
||||
|
||||
bool IsLiteral(const expr &lit, expr &atom, expr &val);
|
||||
|
||||
expr Negate(const expr &f);
|
||||
|
||||
expr SimplifyAndOr(const std::vector<expr> &args, bool is_and);
|
||||
|
||||
expr ReallySimplifyAndOr(const std::vector<expr> &args, bool is_and);
|
||||
|
||||
int MaxIndex(hash_map<ast,int> &memo, const Term &t);
|
||||
|
||||
bool IsClosedFormula(const Term &t);
|
||||
|
||||
Term AdjustQuantifiers(const Term &t);
|
||||
|
||||
FuncDecl RenumberPred(const FuncDecl &f, int n);
|
||||
|
||||
protected:
|
||||
|
||||
void SummarizeRec(hash_set<ast> &memo, std::vector<expr> &lits, int &ops, const Term &t);
|
||||
int CountOperatorsRec(hash_set<ast> &memo, const Term &t);
|
||||
void RemoveRedundancyOp(bool pol, std::vector<expr> &args, hash_map<ast, Term> &smemo);
|
||||
Term RemoveRedundancyRec(hash_map<ast, Term> &memo, hash_map<ast, Term> &smemo, const Term &t);
|
||||
Term SubstAtomTriv(const expr &foo, const expr &atom, const expr &val);
|
||||
expr ReduceAndOr(const std::vector<expr> &args, bool is_and, std::vector<expr> &res);
|
||||
expr FinishAndOr(const std::vector<expr> &args, bool is_and);
|
||||
expr PullCommonFactors(std::vector<expr> &args, bool is_and);
|
||||
Term IneqToEqRec(hash_map<ast, Term> &memo, const Term &t);
|
||||
|
||||
|
||||
};
|
||||
|
||||
|
@ -142,6 +174,7 @@ namespace Duality {
|
|||
context *ctx; /** Z3 context for formulas */
|
||||
solver *slvr; /** Z3 solver */
|
||||
bool need_goals; /** Can the solver use the goal tree to optimize interpolants? */
|
||||
solver aux_solver; /** For temporary use -- don't leave assertions here. */
|
||||
|
||||
/** Tree interpolation. This method assumes the formulas in TermTree
|
||||
"assumptions" are currently asserted in the solver. The return
|
||||
|
@ -167,6 +200,9 @@ namespace Duality {
|
|||
/** Assert a background axiom. */
|
||||
virtual void assert_axiom(const expr &axiom) = 0;
|
||||
|
||||
/** Get the background axioms. */
|
||||
virtual const std::vector<expr> &get_axioms() = 0;
|
||||
|
||||
/** Return a string describing performance. */
|
||||
virtual std::string profile() = 0;
|
||||
|
||||
|
@ -178,6 +214,12 @@ namespace Duality {
|
|||
/** Cancel, throw Canceled object if possible. */
|
||||
virtual void cancel(){ }
|
||||
|
||||
/* Note: aux solver uses extensional array theory, since it
|
||||
needs to be able to produce counter-models for
|
||||
interpolants the have array equalities in them.
|
||||
*/
|
||||
LogicSolver(context &c) : aux_solver(c,true){}
|
||||
|
||||
virtual ~LogicSolver(){}
|
||||
};
|
||||
|
||||
|
@ -202,6 +244,10 @@ namespace Duality {
|
|||
islvr->AssertInterpolationAxiom(axiom);
|
||||
}
|
||||
|
||||
const std::vector<expr> &get_axioms() {
|
||||
return islvr->GetInterpolationAxioms();
|
||||
}
|
||||
|
||||
std::string profile(){
|
||||
return islvr->profile();
|
||||
}
|
||||
|
@ -215,9 +261,9 @@ namespace Duality {
|
|||
}
|
||||
#endif
|
||||
|
||||
iZ3LogicSolver(context &c){
|
||||
iZ3LogicSolver(context &c, bool models = true) : LogicSolver(c) {
|
||||
ctx = ictx = &c;
|
||||
slvr = islvr = new interpolating_solver(*ictx);
|
||||
slvr = islvr = new interpolating_solver(*ictx, models);
|
||||
need_goals = false;
|
||||
islvr->SetWeakInterpolants(true);
|
||||
}
|
||||
|
@ -267,8 +313,8 @@ namespace Duality {
|
|||
}
|
||||
|
||||
LogicSolver *ls;
|
||||
|
||||
private:
|
||||
|
||||
protected:
|
||||
int nodeCount;
|
||||
int edgeCount;
|
||||
|
||||
|
@ -277,16 +323,19 @@ namespace Duality {
|
|||
public:
|
||||
std::list<Edge *> edges;
|
||||
std::list<Node *> nodes;
|
||||
std::list<std::pair<Edge *,Term> > constraints;
|
||||
};
|
||||
|
||||
|
||||
public:
|
||||
model dualModel;
|
||||
private:
|
||||
protected:
|
||||
literals dualLabels;
|
||||
std::list<stack_entry> stack;
|
||||
std::vector<Term> axioms; // only saved here for printing purposes
|
||||
|
||||
solver &aux_solver;
|
||||
hash_set<ast> *proof_core;
|
||||
|
||||
public:
|
||||
|
||||
/** Construct an RPFP graph with a given interpolating prover context. It is allowed to
|
||||
|
@ -296,16 +345,17 @@ namespace Duality {
|
|||
inherit the axioms.
|
||||
*/
|
||||
|
||||
RPFP(LogicSolver *_ls) : Z3User(*(_ls->ctx), *(_ls->slvr)), dualModel(*(_ls->ctx))
|
||||
RPFP(LogicSolver *_ls) : Z3User(*(_ls->ctx)), dualModel(*(_ls->ctx)), aux_solver(_ls->aux_solver)
|
||||
{
|
||||
ls = _ls;
|
||||
nodeCount = 0;
|
||||
edgeCount = 0;
|
||||
stack.push_back(stack_entry());
|
||||
HornClauses = false;
|
||||
proof_core = 0;
|
||||
}
|
||||
|
||||
~RPFP();
|
||||
virtual ~RPFP();
|
||||
|
||||
/** Symbolic representation of a relational transformer */
|
||||
class Transformer
|
||||
|
@ -351,10 +401,10 @@ namespace Duality {
|
|||
bool SubsetEq(const Transformer &other){
|
||||
Term t = owner->SubstParams(other.IndParams,IndParams,other.Formula);
|
||||
expr test = Formula && !t;
|
||||
owner->slvr.push();
|
||||
owner->slvr.add(test);
|
||||
check_result res = owner->slvr.check();
|
||||
owner->slvr.pop(1);
|
||||
owner->aux_solver.push();
|
||||
owner->aux_solver.add(test);
|
||||
check_result res = owner->aux_solver.check();
|
||||
owner->aux_solver.pop(1);
|
||||
return res == unsat;
|
||||
}
|
||||
|
||||
|
@ -444,6 +494,19 @@ namespace Duality {
|
|||
return n;
|
||||
}
|
||||
|
||||
/** Delete a node. You can only do this if not connected to any edges.*/
|
||||
void DeleteNode(Node *node){
|
||||
if(node->Outgoing || !node->Incoming.empty())
|
||||
throw "cannot delete RPFP node";
|
||||
for(std::vector<Node *>::iterator it = nodes.end(), en = nodes.begin(); it != en;){
|
||||
if(*(--it) == node){
|
||||
nodes.erase(it);
|
||||
break;
|
||||
}
|
||||
}
|
||||
delete node;
|
||||
}
|
||||
|
||||
/** This class represents a hyper-edge in the RPFP graph */
|
||||
|
||||
class Edge
|
||||
|
@ -460,6 +523,7 @@ namespace Duality {
|
|||
hash_map<ast,Term> varMap;
|
||||
Edge *map;
|
||||
Term labeled;
|
||||
std::vector<Term> constraints;
|
||||
|
||||
Edge(Node *_Parent, const Transformer &_F, const std::vector<Node *> &_Children, RPFP *_owner, int _number)
|
||||
: F(_F), Parent(_Parent), Children(_Children), dual(expr(_owner->ctx)) {
|
||||
|
@ -480,6 +544,29 @@ namespace Duality {
|
|||
return e;
|
||||
}
|
||||
|
||||
|
||||
/** Delete a hyper-edge and unlink it from any nodes. */
|
||||
void DeleteEdge(Edge *edge){
|
||||
if(edge->Parent)
|
||||
edge->Parent->Outgoing = 0;
|
||||
for(unsigned int i = 0; i < edge->Children.size(); i++){
|
||||
std::vector<Edge *> &ic = edge->Children[i]->Incoming;
|
||||
for(std::vector<Edge *>::iterator it = ic.begin(), en = ic.end(); it != en; ++it){
|
||||
if(*it == edge){
|
||||
ic.erase(it);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
for(std::vector<Edge *>::iterator it = edges.end(), en = edges.begin(); it != en;){
|
||||
if(*(--it) == edge){
|
||||
edges.erase(it);
|
||||
break;
|
||||
}
|
||||
}
|
||||
delete edge;
|
||||
}
|
||||
|
||||
/** Create an edge that lower-bounds its parent. */
|
||||
Edge *CreateLowerBoundEdge(Node *_Parent)
|
||||
{
|
||||
|
@ -492,15 +579,28 @@ namespace Duality {
|
|||
* you must pop the context accordingly. The second argument is
|
||||
* the number of pushes we are inside. */
|
||||
|
||||
void AssertEdge(Edge *e, int persist = 0, bool with_children = false, bool underapprox = false);
|
||||
virtual void AssertEdge(Edge *e, int persist = 0, bool with_children = false, bool underapprox = false);
|
||||
|
||||
/* Constrain an edge by the annotation of one of its children. */
|
||||
|
||||
void ConstrainParent(Edge *parent, Node *child);
|
||||
|
||||
|
||||
/** For incremental solving, asserts the negation of the upper bound associated
|
||||
* with a node.
|
||||
* */
|
||||
|
||||
void AssertNode(Node *n);
|
||||
|
||||
/** Assert a constraint on an edge in the SMT context.
|
||||
*/
|
||||
void ConstrainEdge(Edge *e, const Term &t);
|
||||
|
||||
/** Fix the truth values of atomic propositions in the given
|
||||
edge to their values in the current assignment. */
|
||||
void FixCurrentState(Edge *root);
|
||||
|
||||
void FixCurrentStateFull(Edge *edge, const expr &extra);
|
||||
|
||||
/** Declare a constant in the background theory. */
|
||||
|
||||
void DeclareConstant(const FuncDecl &f);
|
||||
|
@ -554,9 +654,13 @@ namespace Duality {
|
|||
|
||||
lbool Solve(Node *root, int persist);
|
||||
|
||||
/** Same as Solve, but annotates only a single node. */
|
||||
|
||||
lbool SolveSingleNode(Node *root, Node *node);
|
||||
|
||||
/** Get the constraint tree (but don't solve it) */
|
||||
|
||||
TermTree *GetConstraintTree(Node *root);
|
||||
TermTree *GetConstraintTree(Node *root, Node *skip_descendant = 0);
|
||||
|
||||
/** Dispose of the dual model (counterexample) if there is one. */
|
||||
|
||||
|
@ -592,6 +696,13 @@ namespace Duality {
|
|||
|
||||
Term ComputeUnderapprox(Node *root, int persist);
|
||||
|
||||
/** Try to strengthen the annotation of a node by removing disjuncts. */
|
||||
void Generalize(Node *root, Node *node);
|
||||
|
||||
|
||||
/** Compute disjunctive interpolant for node by case splitting */
|
||||
void InterpolateByCases(Node *root, Node *node);
|
||||
|
||||
/** Push a scope. Assertions made after Push can be undone by Pop. */
|
||||
|
||||
void Push();
|
||||
|
@ -623,6 +734,16 @@ namespace Duality {
|
|||
/** Pop a scope (see Push). Note, you cannot pop axioms. */
|
||||
|
||||
void Pop(int num_scopes);
|
||||
|
||||
/** Erase the proof by performing a Pop, Push and re-assertion of
|
||||
all the popped constraints */
|
||||
void PopPush();
|
||||
|
||||
/** Return true if the given edge is used in the proof of unsat.
|
||||
Can be called only after Solve or Check returns an unsat result. */
|
||||
|
||||
bool EdgeUsedInProof(Edge *edge);
|
||||
|
||||
|
||||
/** Convert a collection of clauses to Nodes and Edges in the RPFP.
|
||||
|
||||
|
@ -661,7 +782,7 @@ namespace Duality {
|
|||
};
|
||||
|
||||
|
||||
#ifdef WIN32
|
||||
#ifdef _WINDOWS
|
||||
__declspec(dllexport)
|
||||
#endif
|
||||
void FromClauses(const std::vector<Term> &clauses);
|
||||
|
@ -692,9 +813,31 @@ namespace Duality {
|
|||
/** Edges of the graph. */
|
||||
std::vector<Edge *> edges;
|
||||
|
||||
/** Fuse a vector of transformers. If the total number of inputs of the transformers
|
||||
is N, then the result is an N-ary transfomer whose output is the union of
|
||||
the outputs of the given transformers. The is, suppose we have a vetor of transfoermers
|
||||
{T_i(r_i1,...,r_iN(i) : i=1..M}. The the result is a transformer
|
||||
|
||||
F(r_11,...,r_iN(1),...,r_M1,...,r_MN(M)) =
|
||||
T_1(r_11,...,r_iN(1)) U ... U T_M(r_M1,...,r_MN(M))
|
||||
*/
|
||||
|
||||
Transformer Fuse(const std::vector<Transformer *> &trs);
|
||||
|
||||
/** Fuse edges so that each node is the output of at most one edge. This
|
||||
transformation is solution-preserving, but changes the numbering of edges in
|
||||
counterexamples.
|
||||
*/
|
||||
void FuseEdges();
|
||||
|
||||
void RemoveDeadNodes();
|
||||
|
||||
Term SubstParams(const std::vector<Term> &from,
|
||||
const std::vector<Term> &to, const Term &t);
|
||||
|
||||
Term SubstParamsNoCapture(const std::vector<Term> &from,
|
||||
const std::vector<Term> &to, const Term &t);
|
||||
|
||||
Term Localize(Edge *e, const Term &t);
|
||||
|
||||
void EvalNodeAsConstraint(Node *p, Transformer &res);
|
||||
|
@ -707,8 +850,25 @@ namespace Duality {
|
|||
|
||||
// int GetLabelsRec(hash_map<ast,int> *memo, const Term &f, std::vector<symbol> &labels, bool labpos);
|
||||
|
||||
private:
|
||||
/** Compute and save the proof core for future calls to
|
||||
EdgeUsedInProof. You only need to call this if you will pop
|
||||
the solver before calling EdgeUsedInProof.
|
||||
*/
|
||||
void ComputeProofCore();
|
||||
|
||||
int CumulativeDecisions();
|
||||
|
||||
solver &slvr(){
|
||||
return *ls->slvr;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
void ClearProofCore(){
|
||||
if(proof_core)
|
||||
delete proof_core;
|
||||
proof_core = 0;
|
||||
}
|
||||
|
||||
Term SuffixVariable(const Term &t, int n);
|
||||
|
||||
|
@ -724,10 +884,14 @@ namespace Duality {
|
|||
|
||||
Term ReducedDualEdge(Edge *e);
|
||||
|
||||
TermTree *ToTermTree(Node *root);
|
||||
TermTree *ToTermTree(Node *root, Node *skip_descendant = 0);
|
||||
|
||||
TermTree *ToGoalTree(Node *root);
|
||||
|
||||
void CollapseTermTreeRec(TermTree *root, TermTree *node);
|
||||
|
||||
TermTree *CollapseTermTree(TermTree *node);
|
||||
|
||||
void DecodeTree(Node *root, TermTree *interp, int persist);
|
||||
|
||||
Term GetUpperBound(Node *n);
|
||||
|
@ -777,6 +941,11 @@ namespace Duality {
|
|||
|
||||
Term UnderapproxFormula(const Term &f, hash_set<ast> &dont_cares);
|
||||
|
||||
void ImplicantFullRed(hash_map<ast,int> &memo, const Term &f, std::vector<Term> &lits,
|
||||
hash_set<ast> &done, hash_set<ast> &dont_cares);
|
||||
|
||||
Term UnderapproxFullFormula(const Term &f, hash_set<ast> &dont_cares);
|
||||
|
||||
Term ToRuleRec(Edge *e, hash_map<ast,Term> &memo, const Term &t, std::vector<expr> &quants);
|
||||
|
||||
hash_map<ast,Term> resolve_ite_memo;
|
||||
|
@ -803,10 +972,80 @@ namespace Duality {
|
|||
|
||||
Term SubstBound(hash_map<int,Term> &subst, const Term &t);
|
||||
|
||||
void ConstrainEdgeLocalized(Edge *e, const Term &t);
|
||||
|
||||
void GreedyReduce(solver &s, std::vector<expr> &conjuncts);
|
||||
|
||||
void NegateLits(std::vector<expr> &lits);
|
||||
|
||||
expr SimplifyOr(std::vector<expr> &lits);
|
||||
|
||||
expr SimplifyAnd(std::vector<expr> &lits);
|
||||
|
||||
void SetAnnotation(Node *root, const expr &t);
|
||||
|
||||
void AddEdgeToSolver(Edge *edge);
|
||||
|
||||
void AddToProofCore(hash_set<ast> &core);
|
||||
|
||||
void GetGroundLitsUnderQuants(hash_set<ast> *memo, const Term &f, std::vector<Term> &res, int under);
|
||||
|
||||
Term StrengthenFormulaByCaseSplitting(const Term &f, std::vector<expr> &case_lits);
|
||||
|
||||
expr NegateLit(const expr &f);
|
||||
|
||||
expr GetEdgeFormula(Edge *e, int persist, bool with_children, bool underapprox);
|
||||
|
||||
bool IsVar(const expr &t);
|
||||
|
||||
void GetVarsRec(hash_set<ast> &memo, const expr &cnst, std::vector<expr> &vars);
|
||||
|
||||
expr UnhoistPullRec(hash_map<ast,expr> & memo, const expr &w, hash_map<ast,expr> & init_defs, hash_map<ast,expr> & const_params, hash_map<ast,expr> &const_params_inv, std::vector<expr> &new_params);
|
||||
|
||||
void AddParamsToTransformer(Transformer &trans, const std::vector<expr> ¶ms);
|
||||
|
||||
expr AddParamsToApp(const expr &app, const func_decl &new_decl, const std::vector<expr> ¶ms);
|
||||
|
||||
expr GetRelRec(hash_set<ast> &memo, const expr &t, const func_decl &rel);
|
||||
|
||||
expr GetRel(Edge *edge, int child_idx);
|
||||
|
||||
void GetDefs(const expr &cnst, hash_map<ast,expr> &defs);
|
||||
|
||||
void GetDefsRec(const expr &cnst, hash_map<ast,expr> &defs);
|
||||
|
||||
void AddParamsToNode(Node *node, const std::vector<expr> ¶ms);
|
||||
|
||||
void UnhoistLoop(Edge *loop_edge, Edge *init_edge);
|
||||
|
||||
void Unhoist();
|
||||
|
||||
Term ElimIteRec(hash_map<ast,expr> &memo, const Term &t, std::vector<expr> &cnsts);
|
||||
|
||||
Term ElimIte(const Term &t);
|
||||
|
||||
void MarkLiveNodes(hash_map<Node *,std::vector<Edge *> > &outgoing, hash_set<Node *> &live_nodes, Node *node);
|
||||
|
||||
virtual void slvr_add(const expr &e);
|
||||
|
||||
virtual void slvr_pop(int i);
|
||||
|
||||
virtual void slvr_push();
|
||||
|
||||
virtual check_result slvr_check(unsigned n = 0, expr * const assumptions = 0, unsigned *core_size = 0, expr *core = 0);
|
||||
|
||||
virtual lbool ls_interpolate_tree(TermTree *assumptions,
|
||||
TermTree *&interpolants,
|
||||
model &_model,
|
||||
TermTree *goals = 0,
|
||||
bool weak = false);
|
||||
|
||||
virtual bool proof_core_contains(const expr &e);
|
||||
|
||||
};
|
||||
|
||||
/** RPFP solver base class. */
|
||||
|
||||
/** RPFP solver base class. */
|
||||
|
||||
class Solver {
|
||||
|
||||
|
@ -850,5 +1089,179 @@ namespace Duality {
|
|||
/** Object thrown on cancellation */
|
||||
struct Canceled {};
|
||||
|
||||
/** Object thrown on incompleteness */
|
||||
struct Incompleteness {};
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
// Allow to hash on nodes and edges in deterministic way
|
||||
|
||||
namespace hash_space {
|
||||
template <>
|
||||
class hash<Duality::RPFP::Node *> {
|
||||
public:
|
||||
size_t operator()(const Duality::RPFP::Node *p) const {
|
||||
return p->number;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
namespace hash_space {
|
||||
template <>
|
||||
class hash<Duality::RPFP::Edge *> {
|
||||
public:
|
||||
size_t operator()(const Duality::RPFP::Edge *p) const {
|
||||
return p->number;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// allow to walk sets of nodes without address dependency
|
||||
|
||||
namespace std {
|
||||
template <>
|
||||
class less<Duality::RPFP::Node *> {
|
||||
public:
|
||||
bool operator()(Duality::RPFP::Node * const &s, Duality::RPFP::Node * const &t) const {
|
||||
return s->number < t->number; // s.raw()->get_id() < t.raw()->get_id();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// #define LIMIT_STACK_WEIGHT 5
|
||||
|
||||
|
||||
namespace Duality {
|
||||
/** Caching version of RPFP. Instead of asserting constraints, returns assumption literals */
|
||||
|
||||
class RPFP_caching : public RPFP {
|
||||
public:
|
||||
|
||||
/** appends assumption literals for edge to lits. if with_children is true,
|
||||
includes that annotation of the edge's children.
|
||||
*/
|
||||
void AssertEdgeCache(Edge *e, std::vector<Term> &lits, bool with_children = false);
|
||||
|
||||
/** appends assumption literals for node to lits */
|
||||
void AssertNodeCache(Node *, std::vector<Term> lits);
|
||||
|
||||
/** check assumption lits, and return core */
|
||||
check_result CheckCore(const std::vector<Term> &assumps, std::vector<Term> &core);
|
||||
|
||||
/** Clone another RPFP into this one, keeping a map */
|
||||
void Clone(RPFP *other);
|
||||
|
||||
/** Get the clone of a node */
|
||||
Node *GetNodeClone(Node *other_node);
|
||||
|
||||
/** Get the clone of an edge */
|
||||
Edge *GetEdgeClone(Edge *other_edge);
|
||||
|
||||
/** Try to strengthen the parent of an edge */
|
||||
void GeneralizeCache(Edge *edge);
|
||||
|
||||
/** Try to propagate some facts from children to parents of edge.
|
||||
Return true if success. */
|
||||
bool PropagateCache(Edge *edge);
|
||||
|
||||
/** Construct a caching RPFP using a LogicSolver */
|
||||
RPFP_caching(LogicSolver *_ls) : RPFP(_ls) {}
|
||||
|
||||
/** Constraint an edge by its child's annotation. Return
|
||||
assumption lits. */
|
||||
void ConstrainParentCache(Edge *parent, Node *child, std::vector<Term> &lits);
|
||||
|
||||
#ifdef LIMIT_STACK_WEIGHT
|
||||
virtual void AssertEdge(Edge *e, int persist = 0, bool with_children = false, bool underapprox = false);
|
||||
#endif
|
||||
|
||||
virtual ~RPFP_caching(){}
|
||||
|
||||
protected:
|
||||
hash_map<ast,expr> AssumptionLits;
|
||||
hash_map<Node *, Node *> NodeCloneMap;
|
||||
hash_map<Edge *, Edge *> EdgeCloneMap;
|
||||
std::vector<expr> alit_stack;
|
||||
std::vector<unsigned> alit_stack_sizes;
|
||||
|
||||
// to let us use one solver per edge
|
||||
struct edge_solver {
|
||||
hash_map<ast,expr> AssumptionLits;
|
||||
uptr<solver> slvr;
|
||||
};
|
||||
hash_map<Edge *, edge_solver > edge_solvers;
|
||||
|
||||
#ifdef LIMIT_STACK_WEIGHT
|
||||
struct weight_counter {
|
||||
int val;
|
||||
weight_counter(){val = 0;}
|
||||
void swap(weight_counter &other){
|
||||
std::swap(val,other.val);
|
||||
}
|
||||
};
|
||||
|
||||
struct big_stack_entry {
|
||||
weight_counter weight_added;
|
||||
std::vector<expr> new_alits;
|
||||
std::vector<expr> alit_stack;
|
||||
std::vector<unsigned> alit_stack_sizes;
|
||||
};
|
||||
|
||||
std::vector<expr> new_alits;
|
||||
weight_counter weight_added;
|
||||
std::vector<big_stack_entry> big_stack;
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
void GetAssumptionLits(const expr &fmla, std::vector<expr> &lits, hash_map<ast,expr> *opt_map = 0);
|
||||
|
||||
void GreedyReduceCache(std::vector<expr> &assumps, std::vector<expr> &core);
|
||||
|
||||
void FilterCore(std::vector<expr> &core, std::vector<expr> &full_core);
|
||||
void ConstrainEdgeLocalizedCache(Edge *e, const Term &tl, std::vector<expr> &lits);
|
||||
|
||||
virtual void slvr_add(const expr &e);
|
||||
|
||||
virtual void slvr_pop(int i);
|
||||
|
||||
virtual void slvr_push();
|
||||
|
||||
virtual check_result slvr_check(unsigned n = 0, expr * const assumptions = 0, unsigned *core_size = 0, expr *core = 0);
|
||||
|
||||
virtual lbool ls_interpolate_tree(TermTree *assumptions,
|
||||
TermTree *&interpolants,
|
||||
model &_model,
|
||||
TermTree *goals = 0,
|
||||
bool weak = false);
|
||||
|
||||
virtual bool proof_core_contains(const expr &e);
|
||||
|
||||
void GetTermTreeAssertionLiterals(TermTree *assumptions);
|
||||
|
||||
void GetTermTreeAssertionLiteralsRec(TermTree *assumptions);
|
||||
|
||||
edge_solver &SolverForEdge(Edge *edge, bool models, bool axioms);
|
||||
|
||||
public:
|
||||
struct scoped_solver_for_edge {
|
||||
solver *orig_slvr;
|
||||
RPFP_caching *rpfp;
|
||||
edge_solver *es;
|
||||
scoped_solver_for_edge(RPFP_caching *_rpfp, Edge *edge, bool models = false, bool axioms = false){
|
||||
rpfp = _rpfp;
|
||||
orig_slvr = rpfp->ls->slvr;
|
||||
es = &(rpfp->SolverForEdge(edge,models,axioms));
|
||||
rpfp->ls->slvr = es->slvr.get();
|
||||
rpfp->AssumptionLits.swap(es->AssumptionLits);
|
||||
}
|
||||
~scoped_solver_for_edge(){
|
||||
rpfp->ls->slvr = orig_slvr;
|
||||
rpfp->AssumptionLits.swap(es->AssumptionLits);
|
||||
}
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -25,7 +25,14 @@ Revision History:
|
|||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#ifdef WIN32
|
||||
#pragma warning(disable:4996)
|
||||
#pragma warning(disable:4800)
|
||||
#pragma warning(disable:4267)
|
||||
#endif
|
||||
|
||||
#include "duality_wrapper.h"
|
||||
#include "iz3profiling.h"
|
||||
|
||||
namespace Duality {
|
||||
|
||||
|
@ -103,6 +110,7 @@ namespace Duality {
|
|||
output_time(*pfs, it->second.t);
|
||||
(*pfs) << std::endl;
|
||||
}
|
||||
profiling::print(os); // print the interpolation stats
|
||||
}
|
||||
|
||||
void timer_start(const char *name){
|
||||
|
|
1924
src/duality/duality_rpfp.cpp
Normal file → Executable file
1924
src/duality/duality_rpfp.cpp
Normal file → Executable file
File diff suppressed because it is too large
Load diff
595
src/duality/duality_solver.cpp
Normal file → Executable file
595
src/duality/duality_solver.cpp
Normal file → Executable file
|
@ -19,6 +19,12 @@ Revision History:
|
|||
|
||||
--*/
|
||||
|
||||
#ifdef WIN32
|
||||
#pragma warning(disable:4996)
|
||||
#pragma warning(disable:4800)
|
||||
#pragma warning(disable:4267)
|
||||
#endif
|
||||
|
||||
#include "duality.h"
|
||||
#include "duality_profiling.h"
|
||||
|
||||
|
@ -26,6 +32,7 @@ Revision History:
|
|||
#include <set>
|
||||
#include <map>
|
||||
#include <list>
|
||||
#include <iterator>
|
||||
|
||||
// TODO: make these official options or get rid of them
|
||||
|
||||
|
@ -37,14 +44,23 @@ Revision History:
|
|||
#define MINIMIZE_CANDIDATES
|
||||
// #define MINIMIZE_CANDIDATES_HARDER
|
||||
#define BOUNDED
|
||||
#define CHECK_CANDS_FROM_IND_SET
|
||||
// #define CHECK_CANDS_FROM_IND_SET
|
||||
#define UNDERAPPROX_NODES
|
||||
#define NEW_EXPAND
|
||||
#define EARLY_EXPAND
|
||||
// #define TOP_DOWN
|
||||
// #define EFFORT_BOUNDED_STRAT
|
||||
#define SKIP_UNDERAPPROX_NODES
|
||||
// #define KEEP_EXPANSIONS
|
||||
// #define USE_CACHING_RPFP
|
||||
// #define PROPAGATE_BEFORE_CHECK
|
||||
|
||||
#define USE_RPFP_CLONE
|
||||
#define USE_NEW_GEN_CANDS
|
||||
|
||||
//#define NO_PROPAGATE
|
||||
//#define NO_GENERALIZE
|
||||
//#define NO_DECISIONS
|
||||
|
||||
namespace Duality {
|
||||
|
||||
|
@ -101,7 +117,7 @@ namespace Duality {
|
|||
public:
|
||||
Duality(RPFP *_rpfp)
|
||||
: ctx(_rpfp->ctx),
|
||||
slvr(_rpfp->slvr),
|
||||
slvr(_rpfp->slvr()),
|
||||
nodes(_rpfp->nodes),
|
||||
edges(_rpfp->edges)
|
||||
{
|
||||
|
@ -115,8 +131,36 @@ namespace Duality {
|
|||
Report = false;
|
||||
StratifiedInlining = false;
|
||||
RecursionBound = -1;
|
||||
{
|
||||
scoped_no_proof no_proofs_please(ctx.m());
|
||||
#ifdef USE_RPFP_CLONE
|
||||
clone_rpfp = new RPFP_caching(rpfp->ls);
|
||||
clone_rpfp->Clone(rpfp);
|
||||
#endif
|
||||
#ifdef USE_NEW_GEN_CANDS
|
||||
gen_cands_rpfp = new RPFP_caching(rpfp->ls);
|
||||
gen_cands_rpfp->Clone(rpfp);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
~Duality(){
|
||||
#ifdef USE_RPFP_CLONE
|
||||
delete clone_rpfp;
|
||||
#endif
|
||||
#ifdef USE_NEW_GEN_CANDS
|
||||
delete gen_cands_rpfp;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef USE_RPFP_CLONE
|
||||
RPFP_caching *clone_rpfp;
|
||||
#endif
|
||||
#ifdef USE_NEW_GEN_CANDS
|
||||
RPFP_caching *gen_cands_rpfp;
|
||||
#endif
|
||||
|
||||
|
||||
typedef RPFP::Node Node;
|
||||
typedef RPFP::Edge Edge;
|
||||
|
||||
|
@ -184,7 +228,7 @@ namespace Duality {
|
|||
best.insert(*it);
|
||||
}
|
||||
#else
|
||||
virtual void ChooseExpand(const std::set<RPFP::Node *> &choices, std::set<RPFP::Node *> &best, bool high_priority=false){
|
||||
virtual void ChooseExpand(const std::set<RPFP::Node *> &choices, std::set<RPFP::Node *> &best, bool high_priority=false, bool best_only=false){
|
||||
if(high_priority) return;
|
||||
int best_score = INT_MAX;
|
||||
int worst_score = 0;
|
||||
|
@ -194,13 +238,13 @@ namespace Duality {
|
|||
best_score = std::min(best_score,score);
|
||||
worst_score = std::max(worst_score,score);
|
||||
}
|
||||
int cutoff = best_score + (worst_score-best_score)/2;
|
||||
int cutoff = best_only ? best_score : (best_score + (worst_score-best_score)/2);
|
||||
for(std::set<Node *>::iterator it = choices.begin(), en = choices.end(); it != en; ++it)
|
||||
if(scores[(*it)->map].updates <= cutoff)
|
||||
best.insert(*it);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/** Called when done expanding a tree */
|
||||
virtual void Done() {}
|
||||
};
|
||||
|
@ -804,8 +848,10 @@ namespace Duality {
|
|||
Node *child = chs[i];
|
||||
if(TopoSort[child] < TopoSort[node->map]){
|
||||
Node *leaf = LeafMap[child];
|
||||
if(!indset->Contains(leaf))
|
||||
if(!indset->Contains(leaf)){
|
||||
node->Outgoing->F.Formula = ctx.bool_val(false); // make this a proper leaf, else bogus cex
|
||||
return node->Outgoing;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1085,7 +1131,8 @@ namespace Duality {
|
|||
void ExtractCandidateFromCex(Edge *edge, RPFP *checker, Node *root, Candidate &candidate){
|
||||
candidate.edge = edge;
|
||||
for(unsigned j = 0; j < edge->Children.size(); j++){
|
||||
Edge *lb = root->Outgoing->Children[j]->Outgoing;
|
||||
Node *node = root->Outgoing->Children[j];
|
||||
Edge *lb = node->Outgoing;
|
||||
std::vector<Node *> &insts = insts_of_node[edge->Children[j]];
|
||||
#ifndef MINIMIZE_CANDIDATES
|
||||
for(int k = insts.size()-1; k >= 0; k--)
|
||||
|
@ -1095,8 +1142,8 @@ namespace Duality {
|
|||
{
|
||||
Node *inst = insts[k];
|
||||
if(indset->Contains(inst)){
|
||||
if(checker->Empty(lb->Parent) ||
|
||||
eq(checker->Eval(lb,NodeMarker(inst)),ctx.bool_val(true))){
|
||||
if(checker->Empty(node) ||
|
||||
eq(lb ? checker->Eval(lb,NodeMarker(inst)) : checker->dualModel.eval(NodeMarker(inst)),ctx.bool_val(true))){
|
||||
candidate.Children.push_back(inst);
|
||||
goto next_child;
|
||||
}
|
||||
|
@ -1166,6 +1213,25 @@ namespace Duality {
|
|||
#endif
|
||||
|
||||
|
||||
Node *CheckerForEdgeClone(Edge *edge, RPFP_caching *checker){
|
||||
Edge *gen_cands_edge = checker->GetEdgeClone(edge);
|
||||
Node *root = gen_cands_edge->Parent;
|
||||
root->Outgoing = gen_cands_edge;
|
||||
GenNodeSolutionFromIndSet(edge->Parent, root->Bound);
|
||||
#if 0
|
||||
if(root->Bound.IsFull())
|
||||
return = 0;
|
||||
#endif
|
||||
checker->AssertNode(root);
|
||||
for(unsigned j = 0; j < edge->Children.size(); j++){
|
||||
Node *oc = edge->Children[j];
|
||||
Node *nc = gen_cands_edge->Children[j];
|
||||
GenNodeSolutionWithMarkers(oc,nc->Annotation,true);
|
||||
}
|
||||
checker->AssertEdge(gen_cands_edge,1,true);
|
||||
return root;
|
||||
}
|
||||
|
||||
/** If the current proposed solution is not inductive,
|
||||
use the induction failure to generate candidates for extension. */
|
||||
void GenCandidatesFromInductionFailure(bool full_scan = false){
|
||||
|
@ -1175,6 +1241,7 @@ namespace Duality {
|
|||
Edge *edge = edges[i];
|
||||
if(!full_scan && updated_nodes.find(edge->Parent) == updated_nodes.end())
|
||||
continue;
|
||||
#ifndef USE_NEW_GEN_CANDS
|
||||
slvr.push();
|
||||
RPFP *checker = new RPFP(rpfp->ls);
|
||||
Node *root = CheckerForEdge(edge,checker);
|
||||
|
@ -1186,6 +1253,18 @@ namespace Duality {
|
|||
}
|
||||
slvr.pop(1);
|
||||
delete checker;
|
||||
#else
|
||||
RPFP_caching::scoped_solver_for_edge ssfe(gen_cands_rpfp,edge,true /* models */, true /*axioms*/);
|
||||
gen_cands_rpfp->Push();
|
||||
Node *root = CheckerForEdgeClone(edge,gen_cands_rpfp);
|
||||
if(gen_cands_rpfp->Check(root) != unsat){
|
||||
Candidate candidate;
|
||||
ExtractCandidateFromCex(edge,gen_cands_rpfp,root,candidate);
|
||||
reporter->InductionFailure(edge,candidate.Children);
|
||||
candidates.push_back(candidate);
|
||||
}
|
||||
gen_cands_rpfp->Pop(1);
|
||||
#endif
|
||||
}
|
||||
updated_nodes.clear();
|
||||
timer_stop("GenCandIndFail");
|
||||
|
@ -1270,18 +1349,24 @@ namespace Duality {
|
|||
}
|
||||
}
|
||||
|
||||
bool UpdateNodeToNode(Node *node, Node *top){
|
||||
if(!node->Annotation.SubsetEq(top->Annotation)){
|
||||
reporter->Update(node,top->Annotation);
|
||||
indset->Update(node,top->Annotation);
|
||||
updated_nodes.insert(node->map);
|
||||
node->Annotation.IntersectWith(top->Annotation);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/** Update the unwinding solution, using an interpolant for the
|
||||
derivation tree. */
|
||||
void UpdateWithInterpolant(Node *node, RPFP *tree, Node *top){
|
||||
if(top->Outgoing)
|
||||
for(unsigned i = 0; i < top->Outgoing->Children.size(); i++)
|
||||
UpdateWithInterpolant(node->Outgoing->Children[i],tree,top->Outgoing->Children[i]);
|
||||
if(!node->Annotation.SubsetEq(top->Annotation)){
|
||||
reporter->Update(node,top->Annotation);
|
||||
indset->Update(node,top->Annotation);
|
||||
updated_nodes.insert(node->map);
|
||||
node->Annotation.IntersectWith(top->Annotation);
|
||||
}
|
||||
UpdateNodeToNode(node, top);
|
||||
heuristic->Update(node);
|
||||
}
|
||||
|
||||
|
@ -1303,9 +1388,13 @@ namespace Duality {
|
|||
node. */
|
||||
bool SatisfyUpperBound(Node *node){
|
||||
if(node->Bound.IsFull()) return true;
|
||||
#ifdef PROPAGATE_BEFORE_CHECK
|
||||
Propagate();
|
||||
#endif
|
||||
reporter->Bound(node);
|
||||
int start_decs = rpfp->CumulativeDecisions();
|
||||
DerivationTree dt(this,unwinding,reporter,heuristic,FullExpand);
|
||||
DerivationTree *dtp = new DerivationTreeSlow(this,unwinding,reporter,heuristic,FullExpand);
|
||||
DerivationTree &dt = *dtp;
|
||||
bool res = dt.Derive(unwinding,node,UseUnderapprox);
|
||||
int end_decs = rpfp->CumulativeDecisions();
|
||||
// std::cout << "decisions: " << (end_decs - start_decs) << std::endl;
|
||||
|
@ -1321,6 +1410,7 @@ namespace Duality {
|
|||
UpdateWithInterpolant(node,dt.tree,dt.top);
|
||||
delete dt.tree;
|
||||
}
|
||||
delete dtp;
|
||||
return !res;
|
||||
}
|
||||
|
||||
|
@ -1404,13 +1494,77 @@ namespace Duality {
|
|||
}
|
||||
}
|
||||
|
||||
// Propagate conjuncts up the unwinding
|
||||
void Propagate(){
|
||||
reporter->Message("beginning propagation");
|
||||
timer_start("Propagate");
|
||||
std::vector<Node *> sorted_nodes = unwinding->nodes;
|
||||
std::sort(sorted_nodes.begin(),sorted_nodes.end(),std::less<Node *>()); // sorts by sequence number
|
||||
hash_map<Node *,std::set<expr> > facts;
|
||||
for(unsigned i = 0; i < sorted_nodes.size(); i++){
|
||||
Node *node = sorted_nodes[i];
|
||||
std::set<expr> &node_facts = facts[node->map];
|
||||
if(!(node->Outgoing && indset->Contains(node)))
|
||||
continue;
|
||||
std::vector<expr> conj_vec;
|
||||
unwinding->CollectConjuncts(node->Annotation.Formula,conj_vec);
|
||||
std::set<expr> conjs;
|
||||
std::copy(conj_vec.begin(),conj_vec.end(),std::inserter(conjs,conjs.begin()));
|
||||
if(!node_facts.empty()){
|
||||
RPFP *checker = new RPFP(rpfp->ls);
|
||||
slvr.push();
|
||||
Node *root = checker->CloneNode(node);
|
||||
Edge *edge = node->Outgoing;
|
||||
// checker->AssertNode(root);
|
||||
std::vector<Node *> cs;
|
||||
for(unsigned j = 0; j < edge->Children.size(); j++){
|
||||
Node *oc = edge->Children[j];
|
||||
Node *nc = checker->CloneNode(oc);
|
||||
nc->Annotation = oc->Annotation; // is this needed?
|
||||
cs.push_back(nc);
|
||||
}
|
||||
Edge *checker_edge = checker->CreateEdge(root,edge->F,cs);
|
||||
checker->AssertEdge(checker_edge, 0, true, false);
|
||||
std::vector<expr> propagated;
|
||||
for(std::set<expr> ::iterator it = node_facts.begin(), en = node_facts.end(); it != en;){
|
||||
const expr &fact = *it;
|
||||
if(conjs.find(fact) == conjs.end()){
|
||||
root->Bound.Formula = fact;
|
||||
slvr.push();
|
||||
checker->AssertNode(root);
|
||||
check_result res = checker->Check(root);
|
||||
slvr.pop();
|
||||
if(res != unsat){
|
||||
std::set<expr> ::iterator victim = it;
|
||||
++it;
|
||||
node_facts.erase(victim); // if it ain't true, nix it
|
||||
continue;
|
||||
}
|
||||
propagated.push_back(fact);
|
||||
}
|
||||
++it;
|
||||
}
|
||||
slvr.pop();
|
||||
for(unsigned i = 0; i < propagated.size(); i++){
|
||||
root->Annotation.Formula = propagated[i];
|
||||
UpdateNodeToNode(node,root);
|
||||
}
|
||||
delete checker;
|
||||
}
|
||||
for(std::set<expr> ::iterator it = conjs.begin(), en = conjs.end(); it != en; ++it){
|
||||
expr foo = *it;
|
||||
node_facts.insert(foo);
|
||||
}
|
||||
}
|
||||
timer_stop("Propagate");
|
||||
}
|
||||
|
||||
/** This class represents a derivation tree. */
|
||||
class DerivationTree {
|
||||
public:
|
||||
|
||||
DerivationTree(Duality *_duality, RPFP *rpfp, Reporter *_reporter, Heuristic *_heuristic, bool _full_expand)
|
||||
: slvr(rpfp->slvr),
|
||||
: slvr(rpfp->slvr()),
|
||||
ctx(rpfp->ctx)
|
||||
{
|
||||
duality = _duality;
|
||||
|
@ -1454,7 +1608,13 @@ namespace Duality {
|
|||
constrained = _constrained;
|
||||
false_approx = true;
|
||||
timer_start("Derive");
|
||||
#ifndef USE_CACHING_RPFP
|
||||
tree = _tree ? _tree : new RPFP(rpfp->ls);
|
||||
#else
|
||||
RPFP::LogicSolver *cache_ls = new RPFP::iZ3LogicSolver(ctx);
|
||||
cache_ls->slvr->push();
|
||||
tree = _tree ? _tree : new RPFP_caching(cache_ls);
|
||||
#endif
|
||||
tree->HornClauses = rpfp->HornClauses;
|
||||
tree->Push(); // so we can clear out the solver later when finished
|
||||
top = CreateApproximatedInstance(root);
|
||||
|
@ -1466,19 +1626,28 @@ namespace Duality {
|
|||
timer_start("Pop");
|
||||
tree->Pop(1);
|
||||
timer_stop("Pop");
|
||||
#ifdef USE_CACHING_RPFP
|
||||
cache_ls->slvr->pop(1);
|
||||
delete cache_ls;
|
||||
tree->ls = rpfp->ls;
|
||||
#endif
|
||||
timer_stop("Derive");
|
||||
return res;
|
||||
}
|
||||
|
||||
#define WITH_CHILDREN
|
||||
|
||||
Node *CreateApproximatedInstance(RPFP::Node *from){
|
||||
Node *to = tree->CloneNode(from);
|
||||
to->Annotation = from->Annotation;
|
||||
void InitializeApproximatedInstance(RPFP::Node *to){
|
||||
to->Annotation = to->map->Annotation;
|
||||
#ifndef WITH_CHILDREN
|
||||
tree->CreateLowerBoundEdge(to);
|
||||
#endif
|
||||
leaves.push_back(to);
|
||||
}
|
||||
|
||||
Node *CreateApproximatedInstance(RPFP::Node *from){
|
||||
Node *to = tree->CloneNode(from);
|
||||
InitializeApproximatedInstance(to);
|
||||
return to;
|
||||
}
|
||||
|
||||
|
@ -1491,7 +1660,7 @@ namespace Duality {
|
|||
return res != unsat;
|
||||
}
|
||||
|
||||
bool Build(){
|
||||
virtual bool Build(){
|
||||
#ifdef EFFORT_BOUNDED_STRAT
|
||||
start_decs = tree->CumulativeDecisions();
|
||||
#endif
|
||||
|
@ -1545,15 +1714,25 @@ namespace Duality {
|
|||
}
|
||||
}
|
||||
|
||||
void ExpandNode(RPFP::Node *p){
|
||||
virtual void ExpandNode(RPFP::Node *p){
|
||||
// tree->RemoveEdge(p->Outgoing);
|
||||
Edge *edge = duality->GetNodeOutgoing(p->map,last_decs);
|
||||
std::vector<RPFP::Node *> &cs = edge->Children;
|
||||
std::vector<RPFP::Node *> children(cs.size());
|
||||
for(unsigned i = 0; i < cs.size(); i++)
|
||||
children[i] = CreateApproximatedInstance(cs[i]);
|
||||
Edge *ne = tree->CreateEdge(p, p->map->Outgoing->F, children);
|
||||
ne->map = p->map->Outgoing->map;
|
||||
Edge *ne = p->Outgoing;
|
||||
if(ne) {
|
||||
// reporter->Message("Recycling edge...");
|
||||
std::vector<RPFP::Node *> &cs = ne->Children;
|
||||
for(unsigned i = 0; i < cs.size(); i++)
|
||||
InitializeApproximatedInstance(cs[i]);
|
||||
// ne->dual = expr();
|
||||
}
|
||||
else {
|
||||
Edge *edge = duality->GetNodeOutgoing(p->map,last_decs);
|
||||
std::vector<RPFP::Node *> &cs = edge->Children;
|
||||
std::vector<RPFP::Node *> children(cs.size());
|
||||
for(unsigned i = 0; i < cs.size(); i++)
|
||||
children[i] = CreateApproximatedInstance(cs[i]);
|
||||
ne = tree->CreateEdge(p, p->map->Outgoing->F, children);
|
||||
ne->map = p->map->Outgoing->map;
|
||||
}
|
||||
#ifndef WITH_CHILDREN
|
||||
tree->AssertEdge(ne); // assert the edge in the solver
|
||||
#else
|
||||
|
@ -1573,6 +1752,7 @@ namespace Duality {
|
|||
}
|
||||
#else
|
||||
#if 0
|
||||
|
||||
void ExpansionChoices(std::set<Node *> &best){
|
||||
std::vector <Node *> unused_set, used_set;
|
||||
std::set<Node *> choices;
|
||||
|
@ -1598,12 +1778,12 @@ namespace Duality {
|
|||
heuristic->ChooseExpand(choices, best);
|
||||
}
|
||||
#else
|
||||
void ExpansionChoicesFull(std::set<Node *> &best, bool high_priority){
|
||||
void ExpansionChoicesFull(std::set<Node *> &best, bool high_priority, bool best_only = false){
|
||||
std::set<Node *> choices;
|
||||
for(std::list<RPFP::Node *>::iterator it = leaves.begin(), en = leaves.end(); it != en; ++it)
|
||||
if (high_priority || !tree->Empty(*it)) // if used in the counter-model
|
||||
choices.insert(*it);
|
||||
heuristic->ChooseExpand(choices, best, high_priority);
|
||||
heuristic->ChooseExpand(choices, best, high_priority, best_only);
|
||||
}
|
||||
|
||||
void ExpansionChoicesRec(std::vector <Node *> &unused_set, std::vector <Node *> &used_set,
|
||||
|
@ -1641,9 +1821,9 @@ namespace Duality {
|
|||
|
||||
std::set<Node *> old_choices;
|
||||
|
||||
void ExpansionChoices(std::set<Node *> &best, bool high_priority){
|
||||
void ExpansionChoices(std::set<Node *> &best, bool high_priority, bool best_only = false){
|
||||
if(!underapprox || constrained || high_priority){
|
||||
ExpansionChoicesFull(best, high_priority);
|
||||
ExpansionChoicesFull(best, high_priority,best_only);
|
||||
return;
|
||||
}
|
||||
std::vector <Node *> unused_set, used_set;
|
||||
|
@ -1668,28 +1848,341 @@ namespace Duality {
|
|||
#endif
|
||||
#endif
|
||||
|
||||
bool ExpandSomeNodes(bool high_priority = false){
|
||||
bool ExpandSomeNodes(bool high_priority = false, int max = INT_MAX){
|
||||
#ifdef EFFORT_BOUNDED_STRAT
|
||||
last_decs = tree->CumulativeDecisions() - start_decs;
|
||||
#endif
|
||||
timer_start("ExpandSomeNodes");
|
||||
timer_start("ExpansionChoices");
|
||||
std::set<Node *> choices;
|
||||
ExpansionChoices(choices,high_priority);
|
||||
ExpansionChoices(choices,high_priority,max != INT_MAX);
|
||||
timer_stop("ExpansionChoices");
|
||||
std::list<RPFP::Node *> leaves_copy = leaves; // copy so can modify orig
|
||||
leaves.clear();
|
||||
int count = 0;
|
||||
for(std::list<RPFP::Node *>::iterator it = leaves_copy.begin(), en = leaves_copy.end(); it != en; ++it){
|
||||
if(choices.find(*it) != choices.end())
|
||||
if(choices.find(*it) != choices.end() && count < max){
|
||||
count++;
|
||||
ExpandNode(*it);
|
||||
}
|
||||
else leaves.push_back(*it);
|
||||
}
|
||||
timer_stop("ExpandSomeNodes");
|
||||
return !choices.empty();
|
||||
}
|
||||
|
||||
void RemoveExpansion(RPFP::Node *p){
|
||||
Edge *edge = p->Outgoing;
|
||||
Node *parent = edge->Parent;
|
||||
#ifndef KEEP_EXPANSIONS
|
||||
std::vector<RPFP::Node *> cs = edge->Children;
|
||||
tree->DeleteEdge(edge);
|
||||
for(unsigned i = 0; i < cs.size(); i++)
|
||||
tree->DeleteNode(cs[i]);
|
||||
#endif
|
||||
leaves.push_back(parent);
|
||||
}
|
||||
|
||||
// remove all the descendants of tree root (but not root itself)
|
||||
void RemoveTree(RPFP *tree, RPFP::Node *root){
|
||||
Edge *edge = root->Outgoing;
|
||||
std::vector<RPFP::Node *> cs = edge->Children;
|
||||
tree->DeleteEdge(edge);
|
||||
for(unsigned i = 0; i < cs.size(); i++){
|
||||
RemoveTree(tree,cs[i]);
|
||||
tree->DeleteNode(cs[i]);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class DerivationTreeSlow : public DerivationTree {
|
||||
public:
|
||||
|
||||
struct stack_entry {
|
||||
unsigned level; // SMT solver stack level
|
||||
std::vector<Node *> expansions;
|
||||
};
|
||||
|
||||
std::vector<stack_entry> stack;
|
||||
|
||||
hash_map<Node *, expr> updates;
|
||||
|
||||
DerivationTreeSlow(Duality *_duality, RPFP *rpfp, Reporter *_reporter, Heuristic *_heuristic, bool _full_expand)
|
||||
: DerivationTree(_duality, rpfp, _reporter, _heuristic, _full_expand) {
|
||||
stack.push_back(stack_entry());
|
||||
}
|
||||
|
||||
virtual bool Build(){
|
||||
|
||||
stack.back().level = tree->slvr().get_scope_level();
|
||||
bool was_sat = true;
|
||||
|
||||
while (true)
|
||||
{
|
||||
lbool res;
|
||||
|
||||
unsigned slvr_level = tree->slvr().get_scope_level();
|
||||
if(slvr_level != stack.back().level)
|
||||
throw "stacks out of sync!";
|
||||
|
||||
// res = tree->Solve(top, 1); // incremental solve, keep interpolants for one pop
|
||||
check_result foo = tree->Check(top);
|
||||
res = foo == unsat ? l_false : l_true;
|
||||
|
||||
if (res == l_false) {
|
||||
if (stack.empty()) // should never happen
|
||||
return false;
|
||||
|
||||
{
|
||||
std::vector<Node *> &expansions = stack.back().expansions;
|
||||
int update_count = 0;
|
||||
for(unsigned i = 0; i < expansions.size(); i++){
|
||||
Node *node = expansions[i];
|
||||
tree->SolveSingleNode(top,node);
|
||||
#ifdef NO_GENERALIZE
|
||||
node->Annotation.Formula = tree->RemoveRedundancy(node->Annotation.Formula).simplify();
|
||||
#else
|
||||
if(expansions.size() == 1 && NodeTooComplicated(node))
|
||||
SimplifyNode(node);
|
||||
else
|
||||
node->Annotation.Formula = tree->RemoveRedundancy(node->Annotation.Formula).simplify();
|
||||
Generalize(node);
|
||||
#endif
|
||||
if(RecordUpdate(node))
|
||||
update_count++;
|
||||
else
|
||||
heuristic->Update(node->map); // make it less likely to expand this node in future
|
||||
}
|
||||
if(update_count == 0){
|
||||
if(was_sat)
|
||||
throw Incompleteness();
|
||||
reporter->Message("backtracked without learning");
|
||||
}
|
||||
}
|
||||
tree->ComputeProofCore(); // need to compute the proof core before popping solver
|
||||
bool propagated = false;
|
||||
while(1) {
|
||||
std::vector<Node *> &expansions = stack.back().expansions;
|
||||
bool prev_level_used = LevelUsedInProof(stack.size()-2); // need to compute this before pop
|
||||
tree->Pop(1);
|
||||
hash_set<Node *> leaves_to_remove;
|
||||
for(unsigned i = 0; i < expansions.size(); i++){
|
||||
Node *node = expansions[i];
|
||||
// if(node != top)
|
||||
// tree->ConstrainParent(node->Incoming[0],node);
|
||||
std::vector<Node *> &cs = node->Outgoing->Children;
|
||||
for(unsigned i = 0; i < cs.size(); i++){
|
||||
leaves_to_remove.insert(cs[i]);
|
||||
UnmapNode(cs[i]);
|
||||
if(std::find(updated_nodes.begin(),updated_nodes.end(),cs[i]) != updated_nodes.end())
|
||||
throw "help!";
|
||||
}
|
||||
}
|
||||
RemoveLeaves(leaves_to_remove); // have to do this before actually deleting the children
|
||||
for(unsigned i = 0; i < expansions.size(); i++){
|
||||
Node *node = expansions[i];
|
||||
RemoveExpansion(node);
|
||||
}
|
||||
stack.pop_back();
|
||||
if(stack.size() == 1)break;
|
||||
if(prev_level_used){
|
||||
Node *node = stack.back().expansions[0];
|
||||
#ifndef NO_PROPAGATE
|
||||
if(!Propagate(node)) break;
|
||||
#endif
|
||||
if(!RecordUpdate(node)) break; // shouldn't happen!
|
||||
RemoveUpdateNodesAtCurrentLevel(); // this level is about to be deleted -- remove its children from update list
|
||||
propagated = true;
|
||||
continue;
|
||||
}
|
||||
if(propagated) break; // propagation invalidates the proof core, so disable non-chron backtrack
|
||||
RemoveUpdateNodesAtCurrentLevel(); // this level is about to be deleted -- remove its children from update list
|
||||
std::vector<Node *> &unused_ex = stack.back().expansions;
|
||||
for(unsigned i = 0; i < unused_ex.size(); i++)
|
||||
heuristic->Update(unused_ex[i]->map); // make it less likely to expand this node in future
|
||||
}
|
||||
HandleUpdatedNodes();
|
||||
if(stack.size() == 1){
|
||||
if(top->Outgoing)
|
||||
tree->DeleteEdge(top->Outgoing); // in case we kept the tree
|
||||
return false;
|
||||
}
|
||||
was_sat = false;
|
||||
}
|
||||
else {
|
||||
was_sat = true;
|
||||
tree->Push();
|
||||
std::vector<Node *> &expansions = stack.back().expansions;
|
||||
#ifndef NO_DECISIONS
|
||||
for(unsigned i = 0; i < expansions.size(); i++){
|
||||
tree->FixCurrentState(expansions[i]->Outgoing);
|
||||
}
|
||||
#endif
|
||||
#if 0
|
||||
if(tree->slvr().check() == unsat)
|
||||
throw "help!";
|
||||
#endif
|
||||
stack.push_back(stack_entry());
|
||||
stack.back().level = tree->slvr().get_scope_level();
|
||||
if(ExpandSomeNodes(false,1)){
|
||||
continue;
|
||||
}
|
||||
while(stack.size() > 1){
|
||||
tree->Pop(1);
|
||||
stack.pop_back();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool NodeTooComplicated(Node *node){
|
||||
int ops = tree->CountOperators(node->Annotation.Formula);
|
||||
if(ops > 10) return true;
|
||||
node->Annotation.Formula = tree->RemoveRedundancy(node->Annotation.Formula).simplify();
|
||||
return tree->CountOperators(node->Annotation.Formula) > 3;
|
||||
}
|
||||
|
||||
void SimplifyNode(Node *node){
|
||||
// have to destroy the old proof to get a new interpolant
|
||||
timer_start("SimplifyNode");
|
||||
tree->PopPush();
|
||||
tree->InterpolateByCases(top,node);
|
||||
timer_stop("SimplifyNode");
|
||||
}
|
||||
|
||||
bool LevelUsedInProof(unsigned level){
|
||||
std::vector<Node *> &expansions = stack[level].expansions;
|
||||
for(unsigned i = 0; i < expansions.size(); i++)
|
||||
if(tree->EdgeUsedInProof(expansions[i]->Outgoing))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
void RemoveUpdateNodesAtCurrentLevel() {
|
||||
for(std::list<Node *>::iterator it = updated_nodes.begin(), en = updated_nodes.end(); it != en;){
|
||||
Node *node = *it;
|
||||
if(AtCurrentStackLevel(node->Incoming[0]->Parent)){
|
||||
std::list<Node *>::iterator victim = it;
|
||||
++it;
|
||||
updated_nodes.erase(victim);
|
||||
}
|
||||
else
|
||||
++it;
|
||||
}
|
||||
}
|
||||
|
||||
void RemoveLeaves(hash_set<Node *> &leaves_to_remove){
|
||||
std::list<RPFP::Node *> leaves_copy;
|
||||
leaves_copy.swap(leaves);
|
||||
for(std::list<RPFP::Node *>::iterator it = leaves_copy.begin(), en = leaves_copy.end(); it != en; ++it){
|
||||
if(leaves_to_remove.find(*it) == leaves_to_remove.end())
|
||||
leaves.push_back(*it);
|
||||
}
|
||||
}
|
||||
|
||||
hash_map<Node *, std::vector<Node *> > node_map;
|
||||
std::list<Node *> updated_nodes;
|
||||
|
||||
virtual void ExpandNode(RPFP::Node *p){
|
||||
stack.back().expansions.push_back(p);
|
||||
DerivationTree::ExpandNode(p);
|
||||
std::vector<Node *> &new_nodes = p->Outgoing->Children;
|
||||
for(unsigned i = 0; i < new_nodes.size(); i++){
|
||||
Node *n = new_nodes[i];
|
||||
node_map[n->map].push_back(n);
|
||||
}
|
||||
}
|
||||
|
||||
bool RecordUpdate(Node *node){
|
||||
bool res = duality->UpdateNodeToNode(node->map,node);
|
||||
if(res){
|
||||
std::vector<Node *> to_update = node_map[node->map];
|
||||
for(unsigned i = 0; i < to_update.size(); i++){
|
||||
Node *node2 = to_update[i];
|
||||
// maintain invariant that no nodes on updated list are created at current stack level
|
||||
if(node2 == node || !(node->Incoming.size() > 0 && AtCurrentStackLevel(node2->Incoming[0]->Parent))){
|
||||
updated_nodes.push_back(node2);
|
||||
if(node2 != node)
|
||||
node2->Annotation = node->Annotation;
|
||||
}
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
void HandleUpdatedNodes(){
|
||||
for(std::list<Node *>::iterator it = updated_nodes.begin(), en = updated_nodes.end(); it != en;){
|
||||
Node *node = *it;
|
||||
node->Annotation = node->map->Annotation;
|
||||
if(node->Incoming.size() > 0)
|
||||
tree->ConstrainParent(node->Incoming[0],node);
|
||||
if(AtCurrentStackLevel(node->Incoming[0]->Parent)){
|
||||
std::list<Node *>::iterator victim = it;
|
||||
++it;
|
||||
updated_nodes.erase(victim);
|
||||
}
|
||||
else
|
||||
++it;
|
||||
}
|
||||
}
|
||||
|
||||
bool AtCurrentStackLevel(Node *node){
|
||||
std::vector<Node *> vec = stack.back().expansions;
|
||||
for(unsigned i = 0; i < vec.size(); i++)
|
||||
if(vec[i] == node)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
void UnmapNode(Node *node){
|
||||
std::vector<Node *> &vec = node_map[node->map];
|
||||
for(unsigned i = 0; i < vec.size(); i++){
|
||||
if(vec[i] == node){
|
||||
std::swap(vec[i],vec.back());
|
||||
vec.pop_back();
|
||||
return;
|
||||
}
|
||||
}
|
||||
throw "can't unmap node";
|
||||
}
|
||||
|
||||
void Generalize(Node *node){
|
||||
#ifndef USE_RPFP_CLONE
|
||||
tree->Generalize(top,node);
|
||||
#else
|
||||
RPFP_caching *clone_rpfp = duality->clone_rpfp;
|
||||
if(!node->Outgoing->map) return;
|
||||
Edge *clone_edge = clone_rpfp->GetEdgeClone(node->Outgoing->map);
|
||||
Node *clone_node = clone_edge->Parent;
|
||||
clone_node->Annotation = node->Annotation;
|
||||
for(unsigned i = 0; i < clone_edge->Children.size(); i++)
|
||||
clone_edge->Children[i]->Annotation = node->map->Outgoing->Children[i]->Annotation;
|
||||
clone_rpfp->GeneralizeCache(clone_edge);
|
||||
node->Annotation = clone_node->Annotation;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool Propagate(Node *node){
|
||||
#ifdef USE_RPFP_CLONE
|
||||
RPFP_caching *clone_rpfp = duality->clone_rpfp;
|
||||
Edge *clone_edge = clone_rpfp->GetEdgeClone(node->Outgoing->map);
|
||||
Node *clone_node = clone_edge->Parent;
|
||||
clone_node->Annotation = node->map->Annotation;
|
||||
for(unsigned i = 0; i < clone_edge->Children.size(); i++)
|
||||
clone_edge->Children[i]->Annotation = node->map->Outgoing->Children[i]->Annotation;
|
||||
bool res = clone_rpfp->PropagateCache(clone_edge);
|
||||
if(res)
|
||||
node->Annotation = clone_node->Annotation;
|
||||
return res;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
class Covering {
|
||||
|
||||
struct cover_info {
|
||||
|
@ -1708,6 +2201,11 @@ namespace Duality {
|
|||
Duality *parent;
|
||||
bool some_updates;
|
||||
|
||||
#define NO_CONJ_ON_SIMPLE_LOOPS
|
||||
#ifdef NO_CONJ_ON_SIMPLE_LOOPS
|
||||
hash_set<Node *> simple_loops;
|
||||
#endif
|
||||
|
||||
Node *&covered_by(Node *node){
|
||||
return cm[node].covered_by;
|
||||
}
|
||||
|
@ -1742,6 +2240,24 @@ namespace Duality {
|
|||
Covering(Duality *_parent){
|
||||
parent = _parent;
|
||||
some_updates = false;
|
||||
|
||||
#ifdef NO_CONJ_ON_SIMPLE_LOOPS
|
||||
hash_map<Node *,std::vector<Edge *> > outgoing;
|
||||
for(unsigned i = 0; i < parent->rpfp->edges.size(); i++)
|
||||
outgoing[parent->rpfp->edges[i]->Parent].push_back(parent->rpfp->edges[i]);
|
||||
for(unsigned i = 0; i < parent->rpfp->nodes.size(); i++){
|
||||
Node * node = parent->rpfp->nodes[i];
|
||||
std::vector<Edge *> &outs = outgoing[node];
|
||||
if(outs.size() == 2){
|
||||
for(int j = 0; j < 2; j++){
|
||||
Edge *loop_edge = outs[j];
|
||||
if(loop_edge->Children.size() == 1 && loop_edge->Children[0] == loop_edge->Parent)
|
||||
simple_loops.insert(node);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
bool IsCoveredRec(hash_set<Node *> &memo, Node *node){
|
||||
|
@ -1904,6 +2420,11 @@ namespace Duality {
|
|||
}
|
||||
|
||||
bool CouldCover(Node *covered, Node *covering){
|
||||
#ifdef NO_CONJ_ON_SIMPLE_LOOPS
|
||||
// Forsimple loops, we rely on propagation, not covering
|
||||
if(simple_loops.find(covered->map) != simple_loops.end())
|
||||
return false;
|
||||
#endif
|
||||
#ifdef UNDERAPPROX_NODES
|
||||
// if(parent->underapprox_map.find(covering) != parent->underapprox_map.end())
|
||||
// return parent->underapprox_map[covering] == covered;
|
||||
|
@ -2084,7 +2605,7 @@ namespace Duality {
|
|||
return name;
|
||||
}
|
||||
|
||||
virtual void ChooseExpand(const std::set<RPFP::Node *> &choices, std::set<RPFP::Node *> &best, bool high_priority){
|
||||
virtual void ChooseExpand(const std::set<RPFP::Node *> &choices, std::set<RPFP::Node *> &best, bool high_priority, bool best_only){
|
||||
if(!high_priority || !old_cex.tree){
|
||||
Heuristic::ChooseExpand(choices,best,false);
|
||||
return;
|
||||
|
|
137
src/duality/duality_wrapper.cpp
Normal file → Executable file
137
src/duality/duality_wrapper.cpp
Normal file → Executable file
|
@ -18,6 +18,13 @@ Revision History:
|
|||
|
||||
--*/
|
||||
|
||||
#ifdef WIN32
|
||||
#pragma warning(disable:4996)
|
||||
#pragma warning(disable:4800)
|
||||
#pragma warning(disable:4267)
|
||||
#pragma warning(disable:4101)
|
||||
#endif
|
||||
|
||||
#include "duality_wrapper.h"
|
||||
#include <iostream>
|
||||
#include "smt_solver.h"
|
||||
|
@ -26,17 +33,26 @@ Revision History:
|
|||
#include "expr_abstract.h"
|
||||
#include "stopwatch.h"
|
||||
#include "model_smt2_pp.h"
|
||||
#include "qe_lite.h"
|
||||
|
||||
namespace Duality {
|
||||
|
||||
solver::solver(Duality::context& c) : object(c), the_model(c) {
|
||||
solver::solver(Duality::context& c, bool extensional, bool models) : object(c), the_model(c) {
|
||||
params_ref p;
|
||||
p.set_bool("proof", true); // this is currently useless
|
||||
p.set_bool("model", true);
|
||||
if(models)
|
||||
p.set_bool("model", true);
|
||||
p.set_bool("unsat_core", true);
|
||||
p.set_bool("mbqi",true);
|
||||
p.set_str("mbqi.id","itp"); // use mbqi for quantifiers in interpolants
|
||||
p.set_uint("mbqi.max_iterations",1); // use mbqi for quantifiers in interpolants
|
||||
if(true || extensional)
|
||||
p.set_bool("array.extensional",true);
|
||||
scoped_ptr<solver_factory> sf = mk_smt_solver_factory();
|
||||
m_solver = (*sf)(m(), p, true, true, true, ::symbol::null);
|
||||
m_solver->updt_params(p); // why do we have to do this?
|
||||
canceled = false;
|
||||
m_mode = m().proof_mode();
|
||||
}
|
||||
|
||||
expr context::constant(const std::string &name, const sort &ty){
|
||||
|
@ -323,6 +339,25 @@ expr context::make_quant(decl_kind op, const std::vector<sort> &_sorts, const st
|
|||
return simplify(p);
|
||||
}
|
||||
|
||||
expr expr::qe_lite() const {
|
||||
::qe_lite qe(m());
|
||||
expr_ref result(to_expr(raw()),m());
|
||||
proof_ref pf(m());
|
||||
qe(result,pf);
|
||||
return ctx().cook(result);
|
||||
}
|
||||
|
||||
expr expr::qe_lite(const std::set<int> &idxs, bool index_of_bound) const {
|
||||
::qe_lite qe(m());
|
||||
expr_ref result(to_expr(raw()),m());
|
||||
proof_ref pf(m());
|
||||
uint_set uis;
|
||||
for(std::set<int>::const_iterator it=idxs.begin(), en = idxs.end(); it != en; ++it)
|
||||
uis.insert(*it);
|
||||
qe(uis,index_of_bound,result);
|
||||
return ctx().cook(result);
|
||||
}
|
||||
|
||||
expr clone_quantifier(const expr &q, const expr &b){
|
||||
return q.ctx().cook(q.m().update_quantifier(to_quantifier(q.raw()), to_expr(b.raw())));
|
||||
}
|
||||
|
@ -347,6 +382,18 @@ expr context::make_quant(decl_kind op, const std::vector<sort> &_sorts, const st
|
|||
}
|
||||
|
||||
|
||||
unsigned func_decl::arity() const {
|
||||
return (to_func_decl(raw())->get_arity());
|
||||
}
|
||||
|
||||
sort func_decl::domain(unsigned i) const {
|
||||
return sort(ctx(),(to_func_decl(raw())->get_domain(i)));
|
||||
}
|
||||
|
||||
sort func_decl::range() const {
|
||||
return sort(ctx(),(to_func_decl(raw())->get_range()));
|
||||
}
|
||||
|
||||
func_decl context::fresh_func_decl(char const * prefix, const std::vector<sort> &domain, sort const & range){
|
||||
std::vector < ::sort * > _domain(domain.size());
|
||||
for(unsigned i = 0; i < domain.size(); i++)
|
||||
|
@ -425,15 +472,18 @@ expr context::make_quant(decl_kind op, const std::vector<sort> &_sorts, const st
|
|||
|
||||
static int linearize_assumptions(int num,
|
||||
TermTree *assumptions,
|
||||
std::vector<expr> &linear_assumptions,
|
||||
std::vector<std::vector <expr> > &linear_assumptions,
|
||||
std::vector<int> &parents){
|
||||
for(unsigned i = 0; i < assumptions->getChildren().size(); i++)
|
||||
num = linearize_assumptions(num, assumptions->getChildren()[i], linear_assumptions, parents);
|
||||
linear_assumptions[num] = assumptions->getTerm();
|
||||
// linear_assumptions[num].push_back(assumptions->getTerm());
|
||||
for(unsigned i = 0; i < assumptions->getChildren().size(); i++)
|
||||
parents[assumptions->getChildren()[i]->getNumber()] = num;
|
||||
parents[num] = SHRT_MAX; // in case we have no parent
|
||||
linear_assumptions[num] = assumptions->getTerm();
|
||||
linear_assumptions[num].push_back(assumptions->getTerm());
|
||||
std::vector<expr> &ts = assumptions->getTerms();
|
||||
for(unsigned i = 0; i < ts.size(); i++)
|
||||
linear_assumptions[num].push_back(ts[i]);
|
||||
return num + 1;
|
||||
}
|
||||
|
||||
|
@ -462,14 +512,15 @@ expr context::make_quant(decl_kind op, const std::vector<sort> &_sorts, const st
|
|||
|
||||
{
|
||||
int size = assumptions->number(0);
|
||||
std::vector<expr> linear_assumptions(size);
|
||||
std::vector<std::vector<expr> > linear_assumptions(size);
|
||||
std::vector<int> parents(size);
|
||||
linearize_assumptions(0,assumptions,linear_assumptions,parents);
|
||||
|
||||
ptr_vector< ::ast> _interpolants(size-1);
|
||||
ptr_vector< ::ast>_assumptions(size);
|
||||
vector<ptr_vector< ::ast> >_assumptions(size);
|
||||
for(int i = 0; i < size; i++)
|
||||
_assumptions[i] = linear_assumptions[i];
|
||||
for(unsigned j = 0; j < linear_assumptions[i].size(); j++)
|
||||
_assumptions[i].push_back(linear_assumptions[i][j]);
|
||||
::vector<int> _parents; _parents.resize(parents.size());
|
||||
for(unsigned i = 0; i < parents.size(); i++)
|
||||
_parents[i] = parents[i];
|
||||
|
@ -477,14 +528,18 @@ expr context::make_quant(decl_kind op, const std::vector<sort> &_sorts, const st
|
|||
for(unsigned i = 0; i < theory.size(); i++)
|
||||
_theory[i] = theory[i];
|
||||
|
||||
push();
|
||||
|
||||
if(!incremental){
|
||||
push();
|
||||
for(unsigned i = 0; i < linear_assumptions.size(); i++)
|
||||
add(linear_assumptions[i]);
|
||||
for(unsigned j = 0; j < linear_assumptions[i].size(); j++)
|
||||
add(linear_assumptions[i][j]);
|
||||
}
|
||||
|
||||
check_result res = check();
|
||||
check_result res = unsat;
|
||||
|
||||
if(!m_solver->get_proof())
|
||||
res = check();
|
||||
|
||||
if(res == unsat){
|
||||
|
||||
|
@ -517,7 +572,8 @@ expr context::make_quant(decl_kind op, const std::vector<sort> &_sorts, const st
|
|||
}
|
||||
#endif
|
||||
|
||||
pop();
|
||||
if(!incremental)
|
||||
pop();
|
||||
|
||||
return (res == unsat) ? l_false : ((res == sat) ? l_true : l_undef);
|
||||
|
||||
|
@ -549,6 +605,29 @@ expr context::make_quant(decl_kind op, const std::vector<sort> &_sorts, const st
|
|||
return "";
|
||||
}
|
||||
|
||||
|
||||
static void get_assumptions_rec(stl_ext::hash_set<ast> &memo, const proof &pf, std::vector<expr> &assumps){
|
||||
if(memo.find(pf) != memo.end())return;
|
||||
memo.insert(pf);
|
||||
pfrule dk = pf.rule();
|
||||
if(dk == PR_ASSERTED){
|
||||
expr con = pf.conc();
|
||||
assumps.push_back(con);
|
||||
}
|
||||
else {
|
||||
unsigned nprems = pf.num_prems();
|
||||
for(unsigned i = 0; i < nprems; i++){
|
||||
proof arg = pf.prem(i);
|
||||
get_assumptions_rec(memo,arg,assumps);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void proof::get_assumptions(std::vector<expr> &assumps){
|
||||
stl_ext::hash_set<ast> memo;
|
||||
get_assumptions_rec(memo,*this,assumps);
|
||||
}
|
||||
|
||||
|
||||
void ast::show() const{
|
||||
std::cout << mk_pp(raw(), m()) << std::endl;
|
||||
|
@ -559,6 +638,40 @@ expr context::make_quant(decl_kind op, const std::vector<sort> &_sorts, const st
|
|||
std::cout << std::endl;
|
||||
}
|
||||
|
||||
void model::show_hash() const {
|
||||
std::ostringstream ss;
|
||||
model_smt2_pp(ss, m(), *m_model, 0);
|
||||
hash_space::hash<std::string> hasher;
|
||||
unsigned h = hasher(ss.str());
|
||||
std::cout << "model hash: " << h << "\n";
|
||||
}
|
||||
|
||||
void solver::show() {
|
||||
unsigned n = m_solver->get_num_assertions();
|
||||
if(!n)
|
||||
return;
|
||||
ast_smt_pp pp(m());
|
||||
for (unsigned i = 0; i < n-1; ++i)
|
||||
pp.add_assumption(m_solver->get_assertion(i));
|
||||
pp.display_smt2(std::cout, m_solver->get_assertion(n-1));
|
||||
}
|
||||
|
||||
void solver::show_assertion_ids() {
|
||||
#if 0
|
||||
unsigned n = m_solver->get_num_assertions();
|
||||
std::cerr << "assertion ids: ";
|
||||
for (unsigned i = 0; i < n-1; ++i)
|
||||
std::cerr << " " << m_solver->get_assertion(i)->get_id();
|
||||
std::cerr << "\n";
|
||||
#else
|
||||
unsigned n = m_solver->get_num_assertions();
|
||||
std::cerr << "assertion ids hash: ";
|
||||
unsigned h = 0;
|
||||
for (unsigned i = 0; i < n-1; ++i)
|
||||
h += m_solver->get_assertion(i)->get_id();
|
||||
std::cerr << h << "\n";
|
||||
#endif
|
||||
}
|
||||
|
||||
void include_ast_show(ast &a){
|
||||
a.show();
|
||||
|
|
|
@ -26,6 +26,7 @@ Revision History:
|
|||
#include<sstream>
|
||||
#include<vector>
|
||||
#include<list>
|
||||
#include <set>
|
||||
#include"version.h"
|
||||
#include<limits.h>
|
||||
|
||||
|
@ -50,6 +51,7 @@ Revision History:
|
|||
#include"scoped_ctrl_c.h"
|
||||
#include"cancel_eh.h"
|
||||
#include"scoped_timer.h"
|
||||
#include"scoped_proof.h"
|
||||
|
||||
namespace Duality {
|
||||
|
||||
|
@ -393,6 +395,7 @@ namespace Duality {
|
|||
sort array_range() const;
|
||||
};
|
||||
|
||||
|
||||
class func_decl : public ast {
|
||||
public:
|
||||
func_decl() : ast() {}
|
||||
|
@ -412,6 +415,7 @@ namespace Duality {
|
|||
|
||||
expr operator()(unsigned n, expr const * args) const;
|
||||
expr operator()(const std::vector<expr> &args) const;
|
||||
expr operator()() const;
|
||||
expr operator()(expr const & a) const;
|
||||
expr operator()(int a) const;
|
||||
expr operator()(expr const & a1, expr const & a2) const;
|
||||
|
@ -447,6 +451,7 @@ namespace Duality {
|
|||
bool is_datatype() const { return get_sort().is_datatype(); }
|
||||
bool is_relation() const { return get_sort().is_relation(); }
|
||||
bool is_finite_domain() const { return get_sort().is_finite_domain(); }
|
||||
bool is_true() const {return is_app() && decl().get_decl_kind() == True; }
|
||||
|
||||
bool is_numeral() const {
|
||||
return is_app() && decl().get_decl_kind() == OtherArith && m().is_unique_value(to_expr(raw()));
|
||||
|
@ -455,6 +460,8 @@ namespace Duality {
|
|||
bool is_quantifier() const {return raw()->get_kind() == AST_QUANTIFIER;}
|
||||
bool is_var() const {return raw()->get_kind() == AST_VAR;}
|
||||
bool is_label (bool &pos,std::vector<symbol> &names) const ;
|
||||
bool is_ground() const {return to_app(raw())->is_ground();}
|
||||
bool has_quantifiers() const {return to_app(raw())->has_quantifiers();}
|
||||
|
||||
// operator Z3_app() const { assert(is_app()); return reinterpret_cast<Z3_app>(m_ast); }
|
||||
func_decl decl() const {return func_decl(ctx(),to_app(raw())->get_decl());}
|
||||
|
@ -554,6 +561,10 @@ namespace Duality {
|
|||
|
||||
expr simplify(params const & p) const;
|
||||
|
||||
expr qe_lite() const;
|
||||
|
||||
expr qe_lite(const std::set<int> &idxs, bool index_of_bound) const;
|
||||
|
||||
friend expr clone_quantifier(const expr &, const expr &);
|
||||
|
||||
friend expr clone_quantifier(const expr &q, const expr &b, const std::vector<expr> &patterns);
|
||||
|
@ -593,6 +604,36 @@ namespace Duality {
|
|||
};
|
||||
|
||||
|
||||
typedef ::decl_kind pfrule;
|
||||
|
||||
class proof : public ast {
|
||||
public:
|
||||
proof(context & c):ast(c) {}
|
||||
proof(context & c, ::proof *s):ast(c, s) {}
|
||||
proof(proof const & s):ast(s) {}
|
||||
operator ::proof*() const { return to_app(raw()); }
|
||||
proof & operator=(proof const & s) { return static_cast<proof&>(ast::operator=(s)); }
|
||||
|
||||
pfrule rule() const {
|
||||
::func_decl *d = to_app(raw())->get_decl();
|
||||
return d->get_decl_kind();
|
||||
}
|
||||
|
||||
unsigned num_prems() const {
|
||||
return to_app(raw())->get_num_args() - 1;
|
||||
}
|
||||
|
||||
expr conc() const {
|
||||
return ctx().cook(to_app(raw())->get_arg(num_prems()));
|
||||
}
|
||||
|
||||
proof prem(unsigned i) const {
|
||||
return proof(ctx(),to_app(to_app(raw())->get_arg(i)));
|
||||
}
|
||||
|
||||
void get_assumptions(std::vector<expr> &assumps);
|
||||
};
|
||||
|
||||
#if 0
|
||||
|
||||
#if Z3_MAJOR_VERSION > 4 || Z3_MAJOR_VERSION == 4 && Z3_MINOR_VERSION >= 3
|
||||
|
@ -682,6 +723,7 @@ namespace Duality {
|
|||
m_model = s;
|
||||
return *this;
|
||||
}
|
||||
bool null() const {return !m_model;}
|
||||
|
||||
expr eval(expr const & n, bool model_completion=true) const {
|
||||
::model * _m = m_model.get();
|
||||
|
@ -691,6 +733,7 @@ namespace Duality {
|
|||
}
|
||||
|
||||
void show() const;
|
||||
void show_hash() const;
|
||||
|
||||
unsigned num_consts() const {return m_model.get()->get_num_constants();}
|
||||
unsigned num_funcs() const {return m_model.get()->get_num_functions();}
|
||||
|
@ -774,8 +817,9 @@ namespace Duality {
|
|||
::solver *m_solver;
|
||||
model the_model;
|
||||
bool canceled;
|
||||
proof_gen_mode m_mode;
|
||||
public:
|
||||
solver(context & c);
|
||||
solver(context & c, bool extensional = false, bool models = true);
|
||||
solver(context & c, ::solver *s):object(c),the_model(c) { m_solver = s; canceled = false;}
|
||||
solver(solver const & s):object(s), the_model(s.the_model) { m_solver = s.m_solver; canceled = false;}
|
||||
~solver() {
|
||||
|
@ -787,6 +831,7 @@ namespace Duality {
|
|||
m_ctx = s.m_ctx;
|
||||
m_solver = s.m_solver;
|
||||
the_model = s.the_model;
|
||||
m_mode = s.m_mode;
|
||||
return *this;
|
||||
}
|
||||
struct cancel_exception {};
|
||||
|
@ -795,11 +840,12 @@ namespace Duality {
|
|||
throw(cancel_exception());
|
||||
}
|
||||
// void set(params const & p) { Z3_solver_set_params(ctx(), m_solver, p); check_error(); }
|
||||
void push() { m_solver->push(); }
|
||||
void pop(unsigned n = 1) { m_solver->pop(n); }
|
||||
void push() { scoped_proof_mode spm(m(),m_mode); m_solver->push(); }
|
||||
void pop(unsigned n = 1) { scoped_proof_mode spm(m(),m_mode); m_solver->pop(n); }
|
||||
// void reset() { Z3_solver_reset(ctx(), m_solver); check_error(); }
|
||||
void add(expr const & e) { m_solver->assert_expr(e); }
|
||||
void add(expr const & e) { scoped_proof_mode spm(m(),m_mode); m_solver->assert_expr(e); }
|
||||
check_result check() {
|
||||
scoped_proof_mode spm(m(),m_mode);
|
||||
checkpoint();
|
||||
lbool r = m_solver->check_sat(0,0);
|
||||
model_ref m;
|
||||
|
@ -808,6 +854,7 @@ namespace Duality {
|
|||
return to_check_result(r);
|
||||
}
|
||||
check_result check_keep_model(unsigned n, expr * const assumptions, unsigned *core_size = 0, expr *core = 0) {
|
||||
scoped_proof_mode spm(m(),m_mode);
|
||||
model old_model(the_model);
|
||||
check_result res = check(n,assumptions,core_size,core);
|
||||
if(the_model == 0)
|
||||
|
@ -815,6 +862,7 @@ namespace Duality {
|
|||
return res;
|
||||
}
|
||||
check_result check(unsigned n, expr * const assumptions, unsigned *core_size = 0, expr *core = 0) {
|
||||
scoped_proof_mode spm(m(),m_mode);
|
||||
checkpoint();
|
||||
std::vector< ::expr *> _assumptions(n);
|
||||
for (unsigned i = 0; i < n; i++) {
|
||||
|
@ -839,6 +887,7 @@ namespace Duality {
|
|||
}
|
||||
#if 0
|
||||
check_result check(expr_vector assumptions) {
|
||||
scoped_proof_mode spm(m(),m_mode);
|
||||
unsigned n = assumptions.size();
|
||||
z3array<Z3_ast> _assumptions(n);
|
||||
for (unsigned i = 0; i < n; i++) {
|
||||
|
@ -863,10 +912,22 @@ namespace Duality {
|
|||
int get_num_decisions();
|
||||
|
||||
void cancel(){
|
||||
scoped_proof_mode spm(m(),m_mode);
|
||||
canceled = true;
|
||||
if(m_solver)
|
||||
m_solver->cancel();
|
||||
}
|
||||
|
||||
unsigned get_scope_level(){ scoped_proof_mode spm(m(),m_mode); return m_solver->get_scope_level();}
|
||||
|
||||
void show();
|
||||
void show_assertion_ids();
|
||||
|
||||
proof get_proof(){
|
||||
scoped_proof_mode spm(m(),m_mode);
|
||||
return proof(ctx(),m_solver->get_proof());
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
#if 0
|
||||
|
@ -1144,6 +1205,9 @@ namespace Duality {
|
|||
inline expr func_decl::operator()(const std::vector<expr> &args) const {
|
||||
return operator()(args.size(),&args[0]);
|
||||
}
|
||||
inline expr func_decl::operator()() const {
|
||||
return operator()(0,0);
|
||||
}
|
||||
inline expr func_decl::operator()(expr const & a) const {
|
||||
return operator()(1,&a);
|
||||
}
|
||||
|
@ -1199,6 +1263,8 @@ namespace Duality {
|
|||
|
||||
inline expr getTerm(){return term;}
|
||||
|
||||
inline std::vector<expr> &getTerms(){return terms;}
|
||||
|
||||
inline std::vector<TermTree *> &getChildren(){
|
||||
return children;
|
||||
}
|
||||
|
@ -1215,6 +1281,8 @@ namespace Duality {
|
|||
}
|
||||
|
||||
inline void setTerm(expr t){term = t;}
|
||||
|
||||
inline void addTerm(expr t){terms.push_back(t);}
|
||||
|
||||
inline void setChildren(const std::vector<TermTree *> & _children){
|
||||
children = _children;
|
||||
|
@ -1231,6 +1299,7 @@ namespace Duality {
|
|||
|
||||
private:
|
||||
expr term;
|
||||
std::vector<expr> terms;
|
||||
std::vector<TermTree *> children;
|
||||
int num;
|
||||
};
|
||||
|
@ -1239,8 +1308,8 @@ namespace Duality {
|
|||
|
||||
class interpolating_solver : public solver {
|
||||
public:
|
||||
interpolating_solver(context &ctx)
|
||||
: solver(ctx)
|
||||
interpolating_solver(context &ctx, bool models = true)
|
||||
: solver(ctx, true, models)
|
||||
{
|
||||
weak_mode = false;
|
||||
}
|
||||
|
@ -1277,6 +1346,7 @@ namespace Duality {
|
|||
void SetWeakInterpolants(bool weak);
|
||||
void SetPrintToFile(const std::string &file_name);
|
||||
|
||||
const std::vector<expr> &GetInterpolationAxioms() {return theory;}
|
||||
const char *profile();
|
||||
|
||||
private:
|
||||
|
@ -1303,6 +1373,21 @@ namespace Duality {
|
|||
typedef double clock_t;
|
||||
clock_t current_time();
|
||||
inline void output_time(std::ostream &os, clock_t time){os << time;}
|
||||
|
||||
template <class X> class uptr {
|
||||
public:
|
||||
X *ptr;
|
||||
uptr(){ptr = 0;}
|
||||
void set(X *_ptr){
|
||||
if(ptr) delete ptr;
|
||||
ptr = _ptr;
|
||||
}
|
||||
X *get(){ return ptr;}
|
||||
~uptr(){
|
||||
if(ptr) delete ptr;
|
||||
}
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
// to make Duality::ast hashable
|
||||
|
@ -1317,7 +1402,7 @@ namespace hash_space {
|
|||
}
|
||||
|
||||
// to make Duality::ast hashable in windows
|
||||
#ifdef WIN32
|
||||
#ifdef _WINDOWS
|
||||
template <> inline
|
||||
size_t stdext::hash_value<Duality::ast >(const Duality::ast& s)
|
||||
{
|
||||
|
@ -1331,7 +1416,20 @@ namespace std {
|
|||
class less<Duality::ast> {
|
||||
public:
|
||||
bool operator()(const Duality::ast &s, const Duality::ast &t) const {
|
||||
return s.raw() < t.raw(); // s.raw()->get_id() < t.raw()->get_id();
|
||||
// return s.raw() < t.raw();
|
||||
return s.raw()->get_id() < t.raw()->get_id();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// to make Duality::ast usable in ordered collections
|
||||
namespace std {
|
||||
template <>
|
||||
class less<Duality::expr> {
|
||||
public:
|
||||
bool operator()(const Duality::expr &s, const Duality::expr &t) const {
|
||||
// return s.raw() < t.raw();
|
||||
return s.raw()->get_id() < t.raw()->get_id();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -1348,7 +1446,7 @@ namespace hash_space {
|
|||
}
|
||||
|
||||
// to make Duality::func_decl hashable in windows
|
||||
#ifdef WIN32
|
||||
#ifdef _WINDOWS
|
||||
template <> inline
|
||||
size_t stdext::hash_value<Duality::func_decl >(const Duality::func_decl& s)
|
||||
{
|
||||
|
@ -1362,11 +1460,11 @@ namespace std {
|
|||
class less<Duality::func_decl> {
|
||||
public:
|
||||
bool operator()(const Duality::func_decl &s, const Duality::func_decl &t) const {
|
||||
return s.raw() < t.raw(); // s.raw()->get_id() < t.raw()->get_id();
|
||||
// return s.raw() < t.raw();
|
||||
return s.raw()->get_id() < t.raw()->get_id();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -18,6 +18,12 @@ Revision History:
|
|||
|
||||
--*/
|
||||
|
||||
#ifdef WIN32
|
||||
#pragma warning(disable:4996)
|
||||
#pragma warning(disable:4800)
|
||||
#pragma warning(disable:4267)
|
||||
#pragma warning(disable:4101)
|
||||
#endif
|
||||
|
||||
#include "iz3base.h"
|
||||
#include <stdio.h>
|
||||
|
|
|
@ -24,6 +24,16 @@ Revision History:
|
|||
#include "iz3mgr.h"
|
||||
#include "iz3scopes.h"
|
||||
|
||||
namespace hash_space {
|
||||
template <>
|
||||
class hash<func_decl *> {
|
||||
public:
|
||||
size_t operator()(func_decl * const &s) const {
|
||||
return (size_t) s;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/* Base class for interpolators. Includes an AST manager and a scoping
|
||||
object as bases. */
|
||||
|
||||
|
@ -182,6 +192,4 @@ class iz3base : public iz3mgr, public scopes {
|
|||
|
||||
|
||||
|
||||
|
||||
|
||||
#endif
|
||||
|
|
|
@ -17,6 +17,13 @@ Revision History:
|
|||
|
||||
--*/
|
||||
|
||||
#ifdef WIN32
|
||||
#pragma warning(disable:4996)
|
||||
#pragma warning(disable:4800)
|
||||
#pragma warning(disable:4267)
|
||||
#pragma warning(disable:4101)
|
||||
#endif
|
||||
|
||||
#include "iz3base.h"
|
||||
#include "iz3checker.h"
|
||||
|
||||
|
|
|
@ -51,6 +51,13 @@ public:
|
|||
typedef hash_map<foci2::ast,ast> NodeToAst;
|
||||
NodeToAst node_to_ast; // maps Z3 ast's to foci expressions
|
||||
|
||||
// We only use this for FuncDeclToSymbol, which has no range destructor
|
||||
struct symb_hash {
|
||||
size_t operator()(const symb &s) const {
|
||||
return (size_t) s;
|
||||
}
|
||||
};
|
||||
|
||||
typedef hash_map<symb,foci2::symb> FuncDeclToSymbol;
|
||||
FuncDeclToSymbol func_decl_to_symbol; // maps Z3 func decls to symbols
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ Revision History:
|
|||
#include <ext/hash_map>
|
||||
#include <ext/hash_set>
|
||||
#else
|
||||
#ifdef WIN32
|
||||
#ifdef _WINDOWS
|
||||
#define stl_ext stdext
|
||||
#define hash_space std
|
||||
#include <hash_map>
|
||||
|
@ -61,12 +61,12 @@ Revision History:
|
|||
|
||||
// stupid STL doesn't include hash function for class string
|
||||
|
||||
#ifndef WIN32
|
||||
#ifndef _WINDOWS
|
||||
|
||||
namespace stl_ext {
|
||||
template <>
|
||||
class hash<std::string> {
|
||||
stl_ext::hash<char *> H;
|
||||
stl_ext::hash<const char *> H;
|
||||
public:
|
||||
size_t operator()(const std::string &s) const {
|
||||
return H(s.c_str());
|
||||
|
@ -86,7 +86,7 @@ namespace hash_space {
|
|||
};
|
||||
}
|
||||
|
||||
#ifdef WIN32
|
||||
#ifdef _WINDOWS
|
||||
template <> inline
|
||||
size_t stdext::hash_value<std::pair<int,int> >(const std::pair<int,int>& p)
|
||||
{ // hash _Keyval to size_t value one-to-one
|
||||
|
@ -112,7 +112,7 @@ size_t stdext::hash_value<std::pair<T *, T *> >(const std::pair<T *, T *>& p)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef WIN32
|
||||
#ifdef _WINDOWS
|
||||
|
||||
namespace std {
|
||||
template <>
|
||||
|
@ -139,8 +139,9 @@ namespace std {
|
|||
#endif
|
||||
|
||||
|
||||
#ifndef WIN32
|
||||
#ifndef _WINDOWS
|
||||
|
||||
#if 0
|
||||
namespace stl_ext {
|
||||
template <class T>
|
||||
class hash<T *> {
|
||||
|
@ -150,10 +151,11 @@ namespace stl_ext {
|
|||
}
|
||||
};
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef WIN32
|
||||
#ifdef _WINDOWS
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -18,6 +18,14 @@ Revision History:
|
|||
--*/
|
||||
|
||||
/* Copyright 2011 Microsoft Research. */
|
||||
|
||||
#ifdef WIN32
|
||||
#pragma warning(disable:4996)
|
||||
#pragma warning(disable:4800)
|
||||
#pragma warning(disable:4267)
|
||||
#pragma warning(disable:4101)
|
||||
#endif
|
||||
|
||||
#include <assert.h>
|
||||
#include <algorithm>
|
||||
#include <stdio.h>
|
||||
|
@ -75,15 +83,16 @@ struct frame_reducer : public iz3mgr {
|
|||
}
|
||||
}
|
||||
|
||||
void get_frames(const std::vector<ast> &z3_preds,
|
||||
void get_frames(const std::vector<std::vector<ast> >&z3_preds,
|
||||
const std::vector<int> &orig_parents,
|
||||
std::vector<ast> &assertions,
|
||||
std::vector<std::vector<ast> >&assertions,
|
||||
std::vector<int> &parents,
|
||||
z3pf proof){
|
||||
frames = z3_preds.size();
|
||||
orig_parents_copy = orig_parents;
|
||||
for(unsigned i = 0; i < z3_preds.size(); i++)
|
||||
frame_map[z3_preds[i]] = i;
|
||||
for(unsigned j = 0; j < z3_preds[i].size(); j++)
|
||||
frame_map[z3_preds[i][j]] = i;
|
||||
used_frames.resize(frames);
|
||||
hash_set<ast> memo;
|
||||
get_proof_assumptions_rec(proof,memo,used_frames);
|
||||
|
@ -202,7 +211,7 @@ public:
|
|||
}
|
||||
|
||||
void proof_to_interpolant(z3pf proof,
|
||||
const std::vector<ast> &cnsts,
|
||||
const std::vector<std::vector<ast> > &cnsts,
|
||||
const std::vector<int> &parents,
|
||||
std::vector<ast> &interps,
|
||||
const std::vector<ast> &theory,
|
||||
|
@ -212,11 +221,12 @@ public:
|
|||
test_secondary(cnsts,parents,interps);
|
||||
return;
|
||||
#endif
|
||||
|
||||
profiling::timer_start("Interpolation prep");
|
||||
|
||||
// get rid of frames not used in proof
|
||||
|
||||
std::vector<ast> cnsts_vec;
|
||||
std::vector<std::vector<ast> > cnsts_vec;
|
||||
std::vector<int> parents_vec;
|
||||
frame_reducer fr(*this);
|
||||
fr.get_frames(cnsts,parents,cnsts_vec,parents_vec,proof);
|
||||
|
@ -235,10 +245,7 @@ public:
|
|||
#define BINARY_INTERPOLATION
|
||||
#ifndef BINARY_INTERPOLATION
|
||||
// create a translator
|
||||
std::vector<std::vector<ast> > cnsts_vec_vec(cnsts_vec.size());
|
||||
for(unsigned i = 0; i < cnsts_vec.size(); i++)
|
||||
cnsts_vec_vec[i].push_back(cnsts_vec[i]);
|
||||
iz3translation *tr = iz3translation::create(*this,sp,cnsts_vec_vec,parents_vec,theory);
|
||||
iz3translation *tr = iz3translation::create(*this,sp,cnsts_vec,parents_vec,theory);
|
||||
tr_killer.set(tr);
|
||||
|
||||
// set the translation options, if needed
|
||||
|
@ -273,7 +280,8 @@ public:
|
|||
std::vector<std::vector<ast> > cnsts_vec_vec(2);
|
||||
for(unsigned j = 0; j < cnsts_vec.size(); j++){
|
||||
bool is_A = the_base.in_range(j,rng);
|
||||
cnsts_vec_vec[is_A ? 0 : 1].push_back(cnsts_vec[j]);
|
||||
for(unsigned k = 0; k < cnsts_vec[j].size(); k++)
|
||||
cnsts_vec_vec[is_A ? 0 : 1].push_back(cnsts_vec[j][k]);
|
||||
}
|
||||
|
||||
killme<iz3translation> tr_killer_i;
|
||||
|
@ -308,6 +316,19 @@ public:
|
|||
}
|
||||
|
||||
|
||||
void proof_to_interpolant(z3pf proof,
|
||||
std::vector<ast> &cnsts,
|
||||
const std::vector<int> &parents,
|
||||
std::vector<ast> &interps,
|
||||
const std::vector<ast> &theory,
|
||||
interpolation_options_struct *options = 0
|
||||
){
|
||||
std::vector<std::vector<ast> > cnsts_vec(cnsts.size());
|
||||
for(unsigned i = 0; i < cnsts.size(); i++)
|
||||
cnsts_vec[i].push_back(cnsts[i]);
|
||||
proof_to_interpolant(proof,cnsts_vec,parents,interps,theory,options);
|
||||
}
|
||||
|
||||
// same as above, but represents the tree using an ast
|
||||
|
||||
void proof_to_interpolant(const z3pf &proof,
|
||||
|
@ -322,7 +343,6 @@ public:
|
|||
|
||||
to_parents_vec_representation(_cnsts, tree, cnsts, parents, theory, pos_map);
|
||||
|
||||
|
||||
//use the parents vector representation to compute interpolant
|
||||
proof_to_interpolant(proof,cnsts,parents,interps,theory,options);
|
||||
|
||||
|
@ -397,6 +417,35 @@ void iz3interpolate(ast_manager &_m_manager,
|
|||
interps[i] = itp.uncook(_interps[i]);
|
||||
}
|
||||
|
||||
void iz3interpolate(ast_manager &_m_manager,
|
||||
ast *proof,
|
||||
const ::vector<ptr_vector<ast> > &cnsts,
|
||||
const ::vector<int> &parents,
|
||||
ptr_vector<ast> &interps,
|
||||
const ptr_vector<ast> &theory,
|
||||
interpolation_options_struct * options)
|
||||
{
|
||||
iz3interp itp(_m_manager);
|
||||
if(options)
|
||||
options->apply(itp);
|
||||
std::vector<std::vector<iz3mgr::ast> > _cnsts(cnsts.size());
|
||||
std::vector<int> _parents(parents.size());
|
||||
std::vector<iz3mgr::ast> _interps;
|
||||
std::vector<iz3mgr::ast> _theory(theory.size());
|
||||
for(unsigned i = 0; i < cnsts.size(); i++)
|
||||
for(unsigned j = 0; j < cnsts[i].size(); j++)
|
||||
_cnsts[i].push_back(itp.cook(cnsts[i][j]));
|
||||
for(unsigned i = 0; i < parents.size(); i++)
|
||||
_parents[i] = parents[i];
|
||||
for(unsigned i = 0; i < theory.size(); i++)
|
||||
_theory[i] = itp.cook(theory[i]);
|
||||
iz3mgr::ast _proof = itp.cook(proof);
|
||||
itp.proof_to_interpolant(_proof,_cnsts,_parents,_interps,_theory,options);
|
||||
interps.resize(_interps.size());
|
||||
for(unsigned i = 0; i < interps.size(); i++)
|
||||
interps[i] = itp.uncook(_interps[i]);
|
||||
}
|
||||
|
||||
void iz3interpolate(ast_manager &_m_manager,
|
||||
ast *proof,
|
||||
const ptr_vector<ast> &cnsts,
|
||||
|
@ -461,5 +510,3 @@ void interpolation_options_struct::apply(iz3base &b){
|
|||
b.set_option((*it).first,(*it).second);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -56,6 +56,16 @@ void iz3interpolate(ast_manager &_m_manager,
|
|||
const ptr_vector<ast> &theory,
|
||||
interpolation_options_struct * options = 0);
|
||||
|
||||
/* Same as above, but each constraint is a vector of formulas. */
|
||||
|
||||
void iz3interpolate(ast_manager &_m_manager,
|
||||
ast *proof,
|
||||
const vector<ptr_vector<ast> > &cnsts,
|
||||
const ::vector<int> &parents,
|
||||
ptr_vector<ast> &interps,
|
||||
const ptr_vector<ast> &theory,
|
||||
interpolation_options_struct * options = 0);
|
||||
|
||||
/* Compute an interpolant from a proof. This version uses the ast
|
||||
representation, for compatibility with the new API. */
|
||||
|
||||
|
|
91
src/interp/iz3mgr.cpp
Normal file → Executable file
91
src/interp/iz3mgr.cpp
Normal file → Executable file
|
@ -18,6 +18,15 @@ Revision History:
|
|||
--*/
|
||||
|
||||
|
||||
#ifdef WIN32
|
||||
#pragma warning(disable:4996)
|
||||
#pragma warning(disable:4800)
|
||||
#pragma warning(disable:4267)
|
||||
#pragma warning(disable:4101)
|
||||
#pragma warning(disable:4805)
|
||||
#pragma warning(disable:4800)
|
||||
#endif
|
||||
|
||||
#include "iz3mgr.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
@ -172,7 +181,7 @@ iz3mgr::ast iz3mgr::make_quant(opr op, const std::vector<ast> &bvs, ast &body){
|
|||
|
||||
|
||||
std::vector<symbol> names;
|
||||
std::vector<sort *> types;
|
||||
std::vector<class sort *> types;
|
||||
std::vector<expr *> bound_asts;
|
||||
unsigned num_bound = bvs.size();
|
||||
|
||||
|
@ -190,7 +199,7 @@ iz3mgr::ast iz3mgr::make_quant(opr op, const std::vector<ast> &bvs, ast &body){
|
|||
op == Forall,
|
||||
names.size(), &types[0], &names[0], abs_body.get(),
|
||||
0,
|
||||
symbol(),
|
||||
symbol("itp"),
|
||||
symbol(),
|
||||
0, 0,
|
||||
0, 0
|
||||
|
@ -240,6 +249,9 @@ iz3mgr::ast iz3mgr::clone(const ast &t, const std::vector<ast> &_args){
|
|||
|
||||
|
||||
void iz3mgr::show(ast t){
|
||||
if(t.null()){
|
||||
std::cout << "(null)" << std::endl;
|
||||
}
|
||||
params_ref p;
|
||||
p.set_bool("flat_assoc",false);
|
||||
std::cout << mk_pp(t.raw(), m(), p) << std::endl;
|
||||
|
@ -485,7 +497,7 @@ void iz3mgr::get_farkas_coeffs(const ast &proof, std::vector<ast>& coeffs){
|
|||
get_farkas_coeffs(proof,rats);
|
||||
coeffs.resize(rats.size());
|
||||
for(unsigned i = 0; i < rats.size(); i++){
|
||||
sort *is = m().mk_sort(m_arith_fid, INT_SORT);
|
||||
class sort *is = m().mk_sort(m_arith_fid, INT_SORT);
|
||||
ast coeff = cook(m_arith_util.mk_numeral(rats[i],is));
|
||||
coeffs[i] = coeff;
|
||||
}
|
||||
|
@ -640,9 +652,9 @@ void iz3mgr::get_assign_bounds_rule_coeffs(const ast &proof, std::vector<rationa
|
|||
|
||||
/** Set P to P + cQ, where P and Q are linear inequalities. Assumes P is 0 <= y or 0 < y. */
|
||||
|
||||
void iz3mgr::linear_comb(ast &P, const ast &c, const ast &Q){
|
||||
void iz3mgr::linear_comb(ast &P, const ast &c, const ast &Q, bool round_off){
|
||||
ast Qrhs;
|
||||
bool strict = op(P) == Lt;
|
||||
bool qstrict = false;
|
||||
if(is_not(Q)){
|
||||
ast nQ = arg(Q,0);
|
||||
switch(op(nQ)){
|
||||
|
@ -654,11 +666,11 @@ void iz3mgr::linear_comb(ast &P, const ast &c, const ast &Q){
|
|||
break;
|
||||
case Geq:
|
||||
Qrhs = make(Sub,arg(nQ,1),arg(nQ,0));
|
||||
strict = true;
|
||||
qstrict = true;
|
||||
break;
|
||||
case Leq:
|
||||
Qrhs = make(Sub,arg(nQ,0),arg(nQ,1));
|
||||
strict = true;
|
||||
qstrict = true;
|
||||
break;
|
||||
default:
|
||||
throw "not an inequality";
|
||||
|
@ -674,28 +686,34 @@ void iz3mgr::linear_comb(ast &P, const ast &c, const ast &Q){
|
|||
break;
|
||||
case Lt:
|
||||
Qrhs = make(Sub,arg(Q,1),arg(Q,0));
|
||||
strict = true;
|
||||
qstrict = true;
|
||||
break;
|
||||
case Gt:
|
||||
Qrhs = make(Sub,arg(Q,0),arg(Q,1));
|
||||
strict = true;
|
||||
qstrict = true;
|
||||
break;
|
||||
default:
|
||||
throw "not an inequality";
|
||||
}
|
||||
}
|
||||
bool pstrict = op(P) == Lt;
|
||||
if(qstrict && round_off && (pstrict || !(c == make_int(rational(1))))){
|
||||
Qrhs = make(Sub,Qrhs,make_int(rational(1)));
|
||||
qstrict = false;
|
||||
}
|
||||
Qrhs = make(Times,c,Qrhs);
|
||||
bool strict = pstrict || qstrict;
|
||||
if(strict)
|
||||
P = make(Lt,arg(P,0),make(Plus,arg(P,1),Qrhs));
|
||||
else
|
||||
P = make(Leq,arg(P,0),make(Plus,arg(P,1),Qrhs));
|
||||
}
|
||||
|
||||
iz3mgr::ast iz3mgr::sum_inequalities(const std::vector<ast> &coeffs, const std::vector<ast> &ineqs){
|
||||
iz3mgr::ast iz3mgr::sum_inequalities(const std::vector<ast> &coeffs, const std::vector<ast> &ineqs, bool round_off){
|
||||
ast zero = make_int("0");
|
||||
ast thing = make(Leq,zero,zero);
|
||||
for(unsigned i = 0; i < ineqs.size(); i++){
|
||||
linear_comb(thing,coeffs[i],ineqs[i]);
|
||||
linear_comb(thing,coeffs[i],ineqs[i], round_off);
|
||||
}
|
||||
thing = simplify_ineq(thing);
|
||||
return thing;
|
||||
|
@ -761,6 +779,19 @@ int iz3mgr::occurs_in(ast var, ast e){
|
|||
}
|
||||
|
||||
|
||||
bool iz3mgr::solve_arith(const ast &v, const ast &x, const ast &y, ast &res){
|
||||
if(op(x) == Plus){
|
||||
int n = num_args(x);
|
||||
for(int i = 0; i < n; i++){
|
||||
if(arg(x,i) == v){
|
||||
res = z3_simplify(make(Sub, y, make(Sub, x, v)));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// find a controlling equality for a given variable v in a term
|
||||
// a controlling equality is of the form v = t, which, being
|
||||
// false would force the formula to have the specifid truth value
|
||||
|
@ -774,6 +805,9 @@ iz3mgr::ast iz3mgr::cont_eq(stl_ext::hash_set<ast> &cont_eq_memo, bool truth, as
|
|||
if(!truth && op(e) == Equal){
|
||||
if(arg(e,0) == v) return(arg(e,1));
|
||||
if(arg(e,1) == v) return(arg(e,0));
|
||||
ast res;
|
||||
if(solve_arith(v,arg(e,0),arg(e,1),res)) return res;
|
||||
if(solve_arith(v,arg(e,1),arg(e,0),res)) return res;
|
||||
}
|
||||
if((!truth && op(e) == And) || (truth && op(e) == Or)){
|
||||
int nargs = num_args(e);
|
||||
|
@ -815,11 +849,35 @@ iz3mgr::ast iz3mgr::subst(ast var, ast t, ast e){
|
|||
return subst(memo,var,t,e);
|
||||
}
|
||||
|
||||
iz3mgr::ast iz3mgr::subst(stl_ext::hash_map<ast,ast> &subst_memo,ast e){
|
||||
std::pair<ast,ast> foo(e,ast());
|
||||
std::pair<hash_map<ast,ast>::iterator,bool> bar = subst_memo.insert(foo);
|
||||
ast &res = bar.first->second;
|
||||
if(bar.second){
|
||||
int nargs = num_args(e);
|
||||
std::vector<ast> args(nargs);
|
||||
for(int i = 0; i < nargs; i++)
|
||||
args[i] = subst(subst_memo,arg(e,i));
|
||||
opr f = op(e);
|
||||
if(f == Equal && args[0] == args[1]) res = mk_true();
|
||||
else res = clone(e,args);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
// apply a quantifier to a formula, with some optimizations
|
||||
// 1) bound variable does not occur -> no quantifier
|
||||
// 2) bound variable must be equal to some term -> substitute
|
||||
|
||||
iz3mgr::ast iz3mgr::apply_quant(opr quantifier, ast var, ast e){
|
||||
if((quantifier == Forall && op(e) == And)
|
||||
|| (quantifier == Exists && op(e) == Or)){
|
||||
int n = num_args(e);
|
||||
std::vector<ast> args(n);
|
||||
for(int i = 0; i < n; i++)
|
||||
args[i] = apply_quant(quantifier,var,arg(e,i));
|
||||
return make(op(e),args);
|
||||
}
|
||||
if(!occurs_in(var,e))return e;
|
||||
hash_set<ast> cont_eq_memo;
|
||||
ast cterm = cont_eq(cont_eq_memo, quantifier == Forall, var, e);
|
||||
|
@ -829,3 +887,14 @@ iz3mgr::ast iz3mgr::apply_quant(opr quantifier, ast var, ast e){
|
|||
std::vector<ast> bvs; bvs.push_back(var);
|
||||
return make_quant(quantifier,bvs,e);
|
||||
}
|
||||
|
||||
#if 0
|
||||
void iz3mgr::get_bound_substitutes(stl_ext::hash_map<ast,bool> &memo, const ast &e, const ast &var, std::vector<ast> &substs){
|
||||
std::pair<ast,bool> foo(e,false);
|
||||
std::pair<hash_map<ast,bool>::iterator,bool> bar = memo.insert(foo);
|
||||
if(bar.second){
|
||||
if(op(e) ==
|
||||
}
|
||||
|
||||
}
|
||||
#endif
|
||||
|
|
27
src/interp/iz3mgr.h
Normal file → Executable file
27
src/interp/iz3mgr.h
Normal file → Executable file
|
@ -22,6 +22,7 @@ Revision History:
|
|||
|
||||
|
||||
#include <assert.h>
|
||||
#include <vector>
|
||||
#include "iz3hash.h"
|
||||
|
||||
#include"well_sorted.h"
|
||||
|
@ -65,7 +66,7 @@ class ast_i {
|
|||
return _ast == other._ast;
|
||||
}
|
||||
bool lt(const ast_i &other) const {
|
||||
return _ast < other._ast;
|
||||
return _ast->get_id() < other._ast->get_id();
|
||||
}
|
||||
friend bool operator==(const ast_i &x, const ast_i&y){
|
||||
return x.eq(y);
|
||||
|
@ -76,7 +77,7 @@ class ast_i {
|
|||
friend bool operator<(const ast_i &x, const ast_i&y){
|
||||
return x.lt(y);
|
||||
}
|
||||
size_t hash() const {return (size_t)_ast;}
|
||||
size_t hash() const {return _ast->get_id();}
|
||||
bool null() const {return !_ast;}
|
||||
};
|
||||
|
||||
|
@ -126,7 +127,7 @@ namespace hash_space {
|
|||
}
|
||||
|
||||
// to make ast_r hashable in windows
|
||||
#ifdef WIN32
|
||||
#ifdef _WINDOWS
|
||||
template <> inline
|
||||
size_t stdext::hash_value<ast_r >(const ast_r& s)
|
||||
{
|
||||
|
@ -140,7 +141,8 @@ namespace std {
|
|||
class less<ast_r> {
|
||||
public:
|
||||
bool operator()(const ast_r &s, const ast_r &t) const {
|
||||
return s.raw() < t.raw(); // s.raw()->get_id() < t.raw()->get_id();
|
||||
// return s.raw() < t.raw();
|
||||
return s.raw()->get_id() < t.raw()->get_id();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -261,6 +263,7 @@ class iz3mgr {
|
|||
default:;
|
||||
}
|
||||
assert(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ast arg(const ast &t, int i){
|
||||
|
@ -359,6 +362,12 @@ class iz3mgr {
|
|||
return fid == m().get_basic_family_id() && k == BOOL_SORT;
|
||||
}
|
||||
|
||||
bool is_array_type(type t){
|
||||
family_id fid = to_sort(t)->get_family_id();
|
||||
decl_kind k = to_sort(t)->get_decl_kind();
|
||||
return fid == m_array_fid && k == ARRAY_SORT;
|
||||
}
|
||||
|
||||
type get_range_type(symb s){
|
||||
return to_func_decl(s)->get_range();
|
||||
}
|
||||
|
@ -599,9 +608,9 @@ class iz3mgr {
|
|||
return d;
|
||||
}
|
||||
|
||||
void linear_comb(ast &P, const ast &c, const ast &Q);
|
||||
void linear_comb(ast &P, const ast &c, const ast &Q, bool round_off = false);
|
||||
|
||||
ast sum_inequalities(const std::vector<ast> &coeffs, const std::vector<ast> &ineqs);
|
||||
ast sum_inequalities(const std::vector<ast> &coeffs, const std::vector<ast> &ineqs, bool round_off = false);
|
||||
|
||||
ast simplify_ineq(const ast &ineq){
|
||||
ast res = make(op(ineq),arg(ineq,0),z3_simplify(arg(ineq,1)));
|
||||
|
@ -631,6 +640,9 @@ class iz3mgr {
|
|||
|
||||
ast subst(ast var, ast t, ast e);
|
||||
|
||||
// apply a substitution defined by a map
|
||||
ast subst(stl_ext::hash_map<ast,ast> &map, ast e);
|
||||
|
||||
// apply a quantifier to a formula, with some optimizations
|
||||
// 1) bound variable does not occur -> no quantifier
|
||||
// 2) bound variable must be equal to some term -> substitute
|
||||
|
@ -683,13 +695,14 @@ class iz3mgr {
|
|||
|
||||
protected:
|
||||
ast_manager &m_manager;
|
||||
int occurs_in(ast var, ast e);
|
||||
|
||||
private:
|
||||
ast mki(family_id fid, decl_kind sk, int n, raw_ast **args);
|
||||
ast make(opr op, int n, raw_ast **args);
|
||||
ast make(symb sym, int n, raw_ast **args);
|
||||
int occurs_in1(stl_ext::hash_map<ast,bool> &occurs_in_memo, ast var, ast e);
|
||||
int occurs_in(ast var, ast e);
|
||||
bool solve_arith(const ast &v, const ast &x, const ast &y, ast &res);
|
||||
ast cont_eq(stl_ext::hash_set<ast> &cont_eq_memo, bool truth, ast v, ast e);
|
||||
ast subst(stl_ext::hash_map<ast,ast> &subst_memo, ast var, ast t, ast e);
|
||||
|
||||
|
|
|
@ -40,24 +40,38 @@ Revision History:
|
|||
using namespace stl_ext;
|
||||
#endif
|
||||
|
||||
#ifndef WIN32
|
||||
// We promise not to use this for hash_map with range destructor
|
||||
namespace stl_ext {
|
||||
template <>
|
||||
class hash<expr *> {
|
||||
public:
|
||||
size_t operator()(const expr *p) const {
|
||||
return (size_t) p;
|
||||
}
|
||||
};
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
// TBD: algebraic data-types declarations will not be printed.
|
||||
class free_func_visitor {
|
||||
ast_manager& m;
|
||||
func_decl_set m_funcs;
|
||||
obj_hashtable<sort> m_sorts;
|
||||
obj_hashtable<class sort> m_sorts;
|
||||
public:
|
||||
free_func_visitor(ast_manager& m): m(m) {}
|
||||
void operator()(var * n) { }
|
||||
void operator()(app * n) {
|
||||
m_funcs.insert(n->get_decl());
|
||||
sort* s = m.get_sort(n);
|
||||
class sort* s = m.get_sort(n);
|
||||
if (s->get_family_id() == null_family_id) {
|
||||
m_sorts.insert(s);
|
||||
}
|
||||
}
|
||||
void operator()(quantifier * n) { }
|
||||
func_decl_set& funcs() { return m_funcs; }
|
||||
obj_hashtable<sort>& sorts() { return m_sorts; }
|
||||
obj_hashtable<class sort>& sorts() { return m_sorts; }
|
||||
};
|
||||
|
||||
class iz3pp_helper : public iz3mgr {
|
||||
|
@ -132,8 +146,8 @@ void iz3pp(ast_manager &m,
|
|||
func_decl_set &funcs = visitor.funcs();
|
||||
func_decl_set::iterator it = funcs.begin(), end = funcs.end();
|
||||
|
||||
obj_hashtable<sort>& sorts = visitor.sorts();
|
||||
obj_hashtable<sort>::iterator sit = sorts.begin(), send = sorts.end();
|
||||
obj_hashtable<class sort>& sorts = visitor.sorts();
|
||||
obj_hashtable<class sort>::iterator sit = sorts.begin(), send = sorts.end();
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -17,6 +17,13 @@ Revision History:
|
|||
|
||||
--*/
|
||||
|
||||
#ifdef WIN32
|
||||
#pragma warning(disable:4996)
|
||||
#pragma warning(disable:4800)
|
||||
#pragma warning(disable:4267)
|
||||
#pragma warning(disable:4101)
|
||||
#endif
|
||||
|
||||
#include "iz3profiling.h"
|
||||
|
||||
#include <map>
|
||||
|
|
|
@ -18,6 +18,12 @@ Revision History:
|
|||
--*/
|
||||
|
||||
|
||||
#ifdef WIN32
|
||||
#pragma warning(disable:4996)
|
||||
#pragma warning(disable:4800)
|
||||
#pragma warning(disable:4267)
|
||||
#pragma warning(disable:4101)
|
||||
#endif
|
||||
|
||||
#include "iz3proof.h"
|
||||
#include "iz3profiling.h"
|
||||
|
|
844
src/interp/iz3proof_itp.cpp
Normal file → Executable file
844
src/interp/iz3proof_itp.cpp
Normal file → Executable file
File diff suppressed because it is too large
Load diff
|
@ -70,6 +70,9 @@ class iz3proof_itp : public iz3mgr {
|
|||
/** Make an axiom node. The conclusion must be an instance of an axiom. */
|
||||
virtual node make_axiom(const std::vector<ast> &conclusion) = 0;
|
||||
|
||||
/** Make an axiom node. The conclusion must be an instance of an axiom. Localize axiom instance to range*/
|
||||
virtual node make_axiom(const std::vector<ast> &conclusion, prover::range) = 0;
|
||||
|
||||
/** Make a Contra node. This rule takes a derivation of the form
|
||||
Gamma |- False and produces |- \/~Gamma. */
|
||||
|
||||
|
|
|
@ -19,6 +19,8 @@ Revision History:
|
|||
|
||||
#include <assert.h>
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "iz3scopes.h"
|
||||
|
||||
|
||||
|
|
|
@ -17,6 +17,13 @@ Revision History:
|
|||
|
||||
--*/
|
||||
|
||||
#ifdef WIN32
|
||||
#pragma warning(disable:4996)
|
||||
#pragma warning(disable:4800)
|
||||
#pragma warning(disable:4267)
|
||||
#pragma warning(disable:4101)
|
||||
#endif
|
||||
|
||||
#include "iz3translate.h"
|
||||
#include "iz3proof.h"
|
||||
#include "iz3profiling.h"
|
||||
|
@ -109,36 +116,49 @@ public:
|
|||
symbols and assign each to a frame. THe assignment is heuristic.
|
||||
*/
|
||||
|
||||
void scan_skolems_rec(hash_set<ast> &memo, const ast &proof){
|
||||
std::pair<hash_set<ast>::iterator,bool> bar = memo.insert(proof);
|
||||
if(!bar.second)
|
||||
return;
|
||||
int scan_skolems_rec(hash_map<ast,int> &memo, const ast &proof, int frame){
|
||||
std::pair<ast,int> foo(proof,INT_MAX);
|
||||
std::pair<AstToInt::iterator, bool> bar = memo.insert(foo);
|
||||
int &res = bar.first->second;
|
||||
if(!bar.second) return res;
|
||||
pfrule dk = pr(proof);
|
||||
if(dk == PR_SKOLEMIZE){
|
||||
if(dk == PR_ASSERTED){
|
||||
ast ass = conc(proof);
|
||||
res = frame_of_assertion(ass);
|
||||
}
|
||||
else if(dk == PR_SKOLEMIZE){
|
||||
ast quanted = arg(conc(proof),0);
|
||||
if(op(quanted) == Not)
|
||||
quanted = arg(quanted,0);
|
||||
range r = ast_range(quanted);
|
||||
if(range_is_empty(r))
|
||||
r = ast_scope(quanted);
|
||||
// range r = ast_range(quanted);
|
||||
// if(range_is_empty(r))
|
||||
range r = ast_scope(quanted);
|
||||
if(range_is_empty(r))
|
||||
throw "can't skolemize";
|
||||
int frame = range_max(r);
|
||||
if(frame == INT_MAX || !in_range(frame,r))
|
||||
frame = range_max(r); // this is desperation -- may fail
|
||||
if(frame >= frames) frame = frames - 1;
|
||||
add_frame_range(frame,arg(conc(proof),1));
|
||||
r = ast_scope(arg(conc(proof),1));
|
||||
}
|
||||
else if(dk==PR_MODUS_PONENS_OEQ){
|
||||
frame = scan_skolems_rec(memo,prem(proof,0),frame);
|
||||
scan_skolems_rec(memo,prem(proof,1),frame);
|
||||
}
|
||||
else {
|
||||
unsigned nprems = num_prems(proof);
|
||||
for(unsigned i = 0; i < nprems; i++){
|
||||
scan_skolems_rec(memo,prem(proof,i));
|
||||
int bar = scan_skolems_rec(memo,prem(proof,i),frame);
|
||||
if(res == INT_MAX || res == bar) res = bar;
|
||||
else if(bar != INT_MAX) res = -1;
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
void scan_skolems(const ast &proof){
|
||||
hash_set<ast> memo;
|
||||
scan_skolems_rec(memo,proof);
|
||||
hash_map<ast,int> memo;
|
||||
scan_skolems_rec(memo,proof, INT_MAX);
|
||||
}
|
||||
|
||||
// determine locality of a proof term
|
||||
|
@ -168,6 +188,15 @@ public:
|
|||
get_Z3_lits(con, lits);
|
||||
iproof->make_axiom(lits);
|
||||
}
|
||||
#ifdef LOCALIZATION_KLUDGE
|
||||
else if(dk == PR_MODUS_PONENS && pr(prem(proof,0)) == PR_QUANT_INST
|
||||
&& get_locality_rec(prem(proof,1)) == INT_MAX){
|
||||
std::vector<ast> lits;
|
||||
ast con = conc(proof);
|
||||
get_Z3_lits(con, lits);
|
||||
iproof->make_axiom(lits);
|
||||
}
|
||||
#endif
|
||||
else {
|
||||
unsigned nprems = num_prems(proof);
|
||||
for(unsigned i = 0; i < nprems; i++){
|
||||
|
@ -1066,7 +1095,7 @@ public:
|
|||
my_cons.push_back(mk_not(arg(con,i)));
|
||||
my_coeffs.push_back(farkas_coeffs[i]);
|
||||
}
|
||||
ast farkas_con = normalize_inequality(sum_inequalities(my_coeffs,my_cons));
|
||||
ast farkas_con = normalize_inequality(sum_inequalities(my_coeffs,my_cons,true /* round_off */));
|
||||
my_cons.push_back(mk_not(farkas_con));
|
||||
my_coeffs.push_back(make_int("1"));
|
||||
std::vector<Iproof::node> my_hyps;
|
||||
|
@ -1090,7 +1119,7 @@ public:
|
|||
my_cons.push_back(conc(prem(proof,i-1)));
|
||||
my_coeffs.push_back(farkas_coeffs[i]);
|
||||
}
|
||||
ast farkas_con = normalize_inequality(sum_inequalities(my_coeffs,my_cons));
|
||||
ast farkas_con = normalize_inequality(sum_inequalities(my_coeffs,my_cons,true /* round_off */));
|
||||
std::vector<Iproof::node> my_hyps;
|
||||
for(int i = 1; i < nargs; i++)
|
||||
my_hyps.push_back(prems[i-1]);
|
||||
|
@ -1251,6 +1280,84 @@ public:
|
|||
return make(Plus,args);
|
||||
}
|
||||
|
||||
|
||||
ast replace_summands_with_fresh_vars(const ast &t, hash_map<ast,ast> &map){
|
||||
if(op(t) == Plus){
|
||||
int nargs = num_args(t);
|
||||
std::vector<ast> args(nargs);
|
||||
for(int i = 0; i < nargs; i++)
|
||||
args[i] = replace_summands_with_fresh_vars(arg(t,i),map);
|
||||
return make(Plus,args);
|
||||
}
|
||||
if(op(t) == Times)
|
||||
return make(Times,arg(t,0),replace_summands_with_fresh_vars(arg(t,1),map));
|
||||
if(map.find(t) == map.end())
|
||||
map[t] = mk_fresh_constant("@s",get_type(t));
|
||||
return map[t];
|
||||
}
|
||||
|
||||
ast painfully_normalize_ineq(const ast &ineq, hash_map<ast,ast> &map){
|
||||
ast res = normalize_inequality(ineq);
|
||||
ast lhs = arg(res,0);
|
||||
lhs = replace_summands_with_fresh_vars(lhs,map);
|
||||
res = make(op(res),SortSum(lhs),arg(res,1));
|
||||
return res;
|
||||
}
|
||||
|
||||
Iproof::node painfully_reconstruct_farkas(const std::vector<ast> &prems, const std::vector<Iproof::node> &pfs, const ast &con){
|
||||
int nprems = prems.size();
|
||||
std::vector<ast> pcons(nprems),npcons(nprems);
|
||||
hash_map<ast,ast> pcon_to_pf, npcon_to_pcon, pain_map;
|
||||
for(int i = 0; i < nprems; i++){
|
||||
pcons[i] = conc(prems[i]);
|
||||
npcons[i] = painfully_normalize_ineq(pcons[i],pain_map);
|
||||
pcon_to_pf[npcons[i]] = pfs[i];
|
||||
npcon_to_pcon[npcons[i]] = pcons[i];
|
||||
}
|
||||
// ast leq = make(Leq,arg(con,0),arg(con,1));
|
||||
ast ncon = painfully_normalize_ineq(mk_not(con),pain_map);
|
||||
pcons.push_back(mk_not(con));
|
||||
npcons.push_back(ncon);
|
||||
// ast assumps = make(And,pcons);
|
||||
ast new_proof;
|
||||
if(is_sat(npcons,new_proof))
|
||||
throw "Proof error!";
|
||||
pfrule dk = pr(new_proof);
|
||||
int nnp = num_prems(new_proof);
|
||||
std::vector<Iproof::node> my_prems;
|
||||
std::vector<ast> farkas_coeffs, my_pcons;
|
||||
|
||||
if(dk == PR_TH_LEMMA
|
||||
&& get_theory_lemma_theory(new_proof) == ArithTheory
|
||||
&& get_theory_lemma_kind(new_proof) == FarkasKind)
|
||||
get_farkas_coeffs(new_proof,farkas_coeffs);
|
||||
else if(dk == PR_UNIT_RESOLUTION && nnp == 2){
|
||||
for(int i = 0; i < nprems; i++)
|
||||
farkas_coeffs.push_back(make_int(rational(1)));
|
||||
}
|
||||
else
|
||||
throw "cannot reconstruct farkas proof";
|
||||
|
||||
for(int i = 0; i < nnp; i++){
|
||||
ast p = conc(prem(new_proof,i));
|
||||
p = really_normalize_ineq(p);
|
||||
if(pcon_to_pf.find(p) != pcon_to_pf.end()){
|
||||
my_prems.push_back(pcon_to_pf[p]);
|
||||
my_pcons.push_back(npcon_to_pcon[p]);
|
||||
}
|
||||
else if(p == ncon){
|
||||
my_prems.push_back(iproof->make_hypothesis(mk_not(con)));
|
||||
my_pcons.push_back(mk_not(con));
|
||||
}
|
||||
else
|
||||
throw "cannot reconstruct farkas proof";
|
||||
}
|
||||
Iproof::node res = iproof->make_farkas(mk_false(),my_prems,my_pcons,farkas_coeffs);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
|
||||
ast really_normalize_ineq(const ast &ineq){
|
||||
ast res = normalize_inequality(ineq);
|
||||
res = make(op(res),SortSum(arg(res,0)),arg(res,1));
|
||||
|
@ -1289,7 +1396,7 @@ public:
|
|||
farkas_coeffs.push_back(make_int(rational(1)));
|
||||
}
|
||||
else
|
||||
throw "cannot reconstruct farkas proof";
|
||||
return painfully_reconstruct_farkas(prems,pfs,con);
|
||||
|
||||
for(int i = 0; i < nnp; i++){
|
||||
ast p = conc(prem(new_proof,i));
|
||||
|
@ -1364,6 +1471,18 @@ public:
|
|||
return eq2;
|
||||
}
|
||||
|
||||
bool get_store_array(const ast &t, ast &res){
|
||||
if(op(t) == Store){
|
||||
res = t;
|
||||
return true;
|
||||
}
|
||||
int nargs = num_args(t);
|
||||
for(int i = 0; i < nargs; i++)
|
||||
if(get_store_array(arg(t,i),res))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
// translate a Z3 proof term into interpolating proof system
|
||||
|
||||
Iproof::node translate_main(ast proof, bool expect_clause = true){
|
||||
|
@ -1420,9 +1539,11 @@ public:
|
|||
lits.push_back(from_ast(con));
|
||||
|
||||
// pattern match some idioms
|
||||
if(dk == PR_MODUS_PONENS && pr(prem(proof,0)) == PR_QUANT_INST && pr(prem(proof,1)) == PR_REWRITE ) {
|
||||
res = iproof->make_axiom(lits);
|
||||
return res;
|
||||
if(dk == PR_MODUS_PONENS && pr(prem(proof,0)) == PR_QUANT_INST){
|
||||
if(get_locality_rec(prem(proof,1)) == INT_MAX) {
|
||||
res = iproof->make_axiom(lits);
|
||||
return res;
|
||||
}
|
||||
}
|
||||
if(dk == PR_MODUS_PONENS && expect_clause && op(con) == Or){
|
||||
Iproof::node clause = translate_main(prem(proof,0),true);
|
||||
|
@ -1433,12 +1554,20 @@ public:
|
|||
if(dk == PR_MODUS_PONENS && expect_clause && op(con) == Or)
|
||||
std::cout << "foo!\n";
|
||||
|
||||
#if 0
|
||||
if(1 && dk == PR_TRANSITIVITY && pr(prem(proof,1)) == PR_COMMUTATIVITY){
|
||||
Iproof::node clause = translate_main(prem(proof,0),true);
|
||||
res = make(commute,clause,conc(prem(proof,0))); // HACK -- we depend on Iproof::node being same as ast.
|
||||
return res;
|
||||
}
|
||||
|
||||
if(1 && dk == PR_TRANSITIVITY && pr(prem(proof,0)) == PR_COMMUTATIVITY){
|
||||
Iproof::node clause = translate_main(prem(proof,1),true);
|
||||
res = make(commute,clause,conc(prem(proof,1))); // HACK -- we depend on Iproof::node being same as ast.
|
||||
return res;
|
||||
}
|
||||
#endif
|
||||
|
||||
if(dk == PR_TRANSITIVITY && is_eq_propagate(prem(proof,1))){
|
||||
try {
|
||||
res = CombineEqPropagate(proof);
|
||||
|
@ -1448,6 +1577,21 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
/* this is the symmetry rule for ~=, that is, takes x ~= y and yields y ~= x.
|
||||
the proof idiom uses commutativity, monotonicity and mp, but we replace it here
|
||||
with symmtrey and resolution, that is, we prove y = x |- x = y, then resolve
|
||||
with the proof of ~(x=y) to get ~y=x. */
|
||||
if(dk == PR_MODUS_PONENS && pr(prem(proof,1)) == PR_MONOTONICITY && pr(prem(prem(proof,1),0)) == PR_COMMUTATIVITY && num_prems(prem(proof,1)) == 1){
|
||||
Iproof::node ante = translate_main(prem(proof,0),false);
|
||||
ast eq0 = arg(conc(prem(prem(proof,1),0)),0);
|
||||
ast eq1 = arg(conc(prem(prem(proof,1),0)),1);
|
||||
Iproof::node eq1hy = iproof->make_hypothesis(eq1);
|
||||
Iproof::node eq0pf = iproof->make_symmetry(eq0,eq1,eq1hy);
|
||||
std::vector<ast> clause; // just a dummy
|
||||
res = iproof->make_resolution(eq0,clause,ante,eq0pf);
|
||||
return res;
|
||||
}
|
||||
|
||||
// translate all the premises
|
||||
std::vector<Iproof::node> args(nprems);
|
||||
for(unsigned i = 0; i < nprems; i++)
|
||||
|
@ -1578,9 +1722,14 @@ public:
|
|||
throw unsupported();
|
||||
}
|
||||
break;
|
||||
case ArrayTheory: // nothing fancy for this
|
||||
res = iproof->make_axiom(lits);
|
||||
case ArrayTheory: {// nothing fancy for this
|
||||
ast store_array;
|
||||
if(get_store_array(con,store_array))
|
||||
res = iproof->make_axiom(lits,ast_scope(store_array));
|
||||
else
|
||||
res = iproof->make_axiom(lits); // for array extensionality axiom
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw unsupported();
|
||||
}
|
||||
|
@ -1598,6 +1747,16 @@ public:
|
|||
res = iproof->make_axiom(lits);
|
||||
break;
|
||||
}
|
||||
case PR_IFF_TRUE: { // turns p into p <-> true, noop for us
|
||||
res = args[0];
|
||||
break;
|
||||
}
|
||||
case PR_COMMUTATIVITY: {
|
||||
ast comm_equiv = make(op(con),arg(con,0),arg(con,0));
|
||||
ast pf = iproof->make_reflexivity(comm_equiv);
|
||||
res = make(commute,pf,comm_equiv);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
assert(0 && "translate_main: unsupported proof rule");
|
||||
throw unsupported();
|
||||
|
|
|
@ -20,6 +20,14 @@ Revision History:
|
|||
--*/
|
||||
|
||||
|
||||
#ifdef WIN32
|
||||
#pragma warning(disable:4996)
|
||||
#pragma warning(disable:4800)
|
||||
#pragma warning(disable:4267)
|
||||
#pragma warning(disable:4101)
|
||||
#pragma warning(disable:4390)
|
||||
#endif
|
||||
|
||||
#include "iz3translate.h"
|
||||
#include "iz3proof.h"
|
||||
#include "iz3profiling.h"
|
||||
|
@ -38,9 +46,29 @@ Revision History:
|
|||
using namespace stl_ext;
|
||||
#endif
|
||||
|
||||
#ifndef WIN32
|
||||
|
||||
/* This can introduce an address dependency if the range type of hash_map has
|
||||
a destructor. Since the code in this file is not used and only here for
|
||||
historical comparisons, we allow this non-determinism.
|
||||
*/
|
||||
namespace stl_ext {
|
||||
template <class T>
|
||||
class hash<T *> {
|
||||
public:
|
||||
size_t operator()(const T *p) const {
|
||||
return (size_t) p;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
static int lemma_count = 0;
|
||||
#if 0
|
||||
static int nll_lemma_count = 0;
|
||||
#endif
|
||||
#define SHOW_LEMMA_COUNT -1
|
||||
|
||||
// One half of a resolution. We need this to distinguish
|
||||
|
|
12
src/muz/duality/duality_dl_interface.cpp
Normal file → Executable file
12
src/muz/duality/duality_dl_interface.cpp
Normal file → Executable file
|
@ -35,10 +35,15 @@ Revision History:
|
|||
#include "model_smt2_pp.h"
|
||||
#include "model_v2_pp.h"
|
||||
#include "fixedpoint_params.hpp"
|
||||
#include "scoped_proof.h"
|
||||
|
||||
// template class symbol_table<family_id>;
|
||||
|
||||
#ifdef WIN32
|
||||
#pragma warning(disable:4996)
|
||||
#pragma warning(disable:4800)
|
||||
#pragma warning(disable:4267)
|
||||
#pragma warning(disable:4101)
|
||||
#endif
|
||||
|
||||
#include "duality.h"
|
||||
#include "duality_profiling.h"
|
||||
|
@ -213,6 +218,9 @@ lbool dl_interface::query(::expr * query) {
|
|||
catch (Duality::solver::cancel_exception &exn){
|
||||
throw default_exception("duality canceled");
|
||||
}
|
||||
catch (Duality::Solver::Incompleteness &exn){
|
||||
throw default_exception("incompleteness");
|
||||
}
|
||||
|
||||
// profile!
|
||||
|
||||
|
@ -472,7 +480,7 @@ static proof_ref extract_proof(dl_interface *d, Solver::Counterexample &cex) {
|
|||
expr conc = f(args);
|
||||
|
||||
|
||||
::vector<proof *> pprems;
|
||||
::vector< ::proof *> pprems;
|
||||
for(unsigned i = 0; i < prems.size(); i++)
|
||||
pprems.push_back(prems[i].get());
|
||||
|
||||
|
|
|
@ -527,6 +527,9 @@ namespace datalog {
|
|||
|
||||
|
||||
bool mk_rule_inliner::do_eager_inlining(rule * r, rule_set const& rules, rule_ref& res) {
|
||||
if (r->has_negation()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
SASSERT(rules.is_closed());
|
||||
const rule_stratifier& strat = rules.get_stratifier();
|
||||
|
|
|
@ -27,6 +27,7 @@ void qi_params::updt_params(params_ref const & _p) {
|
|||
m_mbqi_max_iterations = p.mbqi_max_iterations();
|
||||
m_mbqi_trace = p.mbqi_trace();
|
||||
m_mbqi_force_template = p.mbqi_force_template();
|
||||
m_mbqi_id = p.mbqi_id();
|
||||
m_qi_profile = p.qi_profile();
|
||||
m_qi_profile_freq = p.qi_profile_freq();
|
||||
m_qi_max_instances = p.qi_max_instances();
|
||||
|
|
|
@ -51,6 +51,7 @@ struct qi_params {
|
|||
unsigned m_mbqi_max_iterations;
|
||||
bool m_mbqi_trace;
|
||||
unsigned m_mbqi_force_template;
|
||||
const char * m_mbqi_id;
|
||||
|
||||
qi_params(params_ref const & p = params_ref()):
|
||||
/*
|
||||
|
@ -97,7 +98,9 @@ struct qi_params {
|
|||
m_mbqi_max_cexs_incr(1),
|
||||
m_mbqi_max_iterations(1000),
|
||||
m_mbqi_trace(false),
|
||||
m_mbqi_force_template(10) {
|
||||
m_mbqi_force_template(10),
|
||||
m_mbqi_id(0)
|
||||
{
|
||||
updt_params(p);
|
||||
}
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ def_module_params(module_name='smt',
|
|||
('mbqi.max_iterations', UINT, 1000, 'maximum number of rounds of MBQI'),
|
||||
('mbqi.trace', BOOL, False, 'generate tracing messages for Model Based Quantifier Instantiation (MBQI). It will display a message before every round of MBQI, and the quantifiers that were not satisfied'),
|
||||
('mbqi.force_template', UINT, 10, 'some quantifiers can be used as templates for building interpretations for functions. Z3 uses heuristics to decide whether a quantifier will be used as a template or not. Quantifiers with weight >= mbqi.force_template are forced to be used as a template'),
|
||||
('mbqi.id', STRING, '', 'Only use model-based instantiation for quantifiers with id\'s beginning with string'),
|
||||
('qi.profile', BOOL, False, 'profile quantifier instantiation'),
|
||||
('qi.profile_freq', UINT, UINT_MAX, 'how frequent results are reported by qi.profile'),
|
||||
('qi.max_instances', UINT, UINT_MAX, 'maximum number of quantifier instantiations'),
|
||||
|
|
|
@ -759,7 +759,8 @@ namespace smt {
|
|||
app * fact = to_app(m_manager.get_fact(pr));
|
||||
app * n1_owner = n1->get_owner();
|
||||
app * n2_owner = n2->get_owner();
|
||||
if (fact->get_num_args() != 2 || (fact->get_arg(0) != n2_owner && fact->get_arg(1) != n2_owner)) {
|
||||
bool is_eq = m_manager.is_eq(fact) || m_manager.is_iff(fact);
|
||||
if (!is_eq || (fact->get_arg(0) != n2_owner && fact->get_arg(1) != n2_owner)) {
|
||||
CTRACE("norm_eq_proof_bug", !m_ctx.is_true(n2) && !m_ctx.is_false(n2),
|
||||
tout << "n1: #" << n1->get_owner_id() << ", n2: #" << n2->get_owner_id() << "\n";
|
||||
if (fact->get_num_args() == 2) {
|
||||
|
|
|
@ -322,6 +322,7 @@ namespace smt {
|
|||
|
||||
for (; it != end; ++it) {
|
||||
quantifier * q = *it;
|
||||
if(!m_qm->mbqi_enabled(q)) continue;
|
||||
if (m_context->is_relevant(q) && m_context->get_assignment(q) == l_true) {
|
||||
if (m_params.m_mbqi_trace && q->get_qid() != symbol::null) {
|
||||
verbose_stream() << "(smt.mbqi :checking " << q->get_qid() << ")\n";
|
||||
|
|
|
@ -335,6 +335,10 @@ namespace smt {
|
|||
return m_imp->m_plugin->model_based();
|
||||
}
|
||||
|
||||
bool quantifier_manager::mbqi_enabled(quantifier *q) const {
|
||||
return m_imp->m_plugin->mbqi_enabled(q);
|
||||
}
|
||||
|
||||
void quantifier_manager::adjust_model(proto_model * m) {
|
||||
m_imp->m_plugin->adjust_model(m);
|
||||
}
|
||||
|
@ -434,10 +438,24 @@ namespace smt {
|
|||
|
||||
virtual bool model_based() const { return m_fparams->m_mbqi; }
|
||||
|
||||
virtual bool mbqi_enabled(quantifier *q) const {
|
||||
if(!m_fparams->m_mbqi_id) return true;
|
||||
const symbol &s = q->get_qid();
|
||||
unsigned len = strlen(m_fparams->m_mbqi_id);
|
||||
if(s == symbol::null || s.is_numerical())
|
||||
return len == 0;
|
||||
return strncmp(s.bare_str(),m_fparams->m_mbqi_id,len) == 0;
|
||||
}
|
||||
|
||||
/* Quantifier id's must begin with the prefix specified by
|
||||
parameter mbqi.id to be instantiated with MBQI. The default
|
||||
value is the empty string, so all quantifiers are
|
||||
instantiated.
|
||||
*/
|
||||
virtual void add(quantifier * q) {
|
||||
if (m_fparams->m_mbqi) {
|
||||
m_model_finder->register_quantifier(q);
|
||||
}
|
||||
if (m_fparams->m_mbqi && mbqi_enabled(q)) {
|
||||
m_model_finder->register_quantifier(q);
|
||||
}
|
||||
}
|
||||
|
||||
virtual void del(quantifier * q) {
|
||||
|
|
|
@ -75,6 +75,7 @@ namespace smt {
|
|||
};
|
||||
|
||||
bool model_based() const;
|
||||
bool mbqi_enabled(quantifier *q) const; // can mbqi instantiate this quantifier?
|
||||
void adjust_model(proto_model * m);
|
||||
check_model_result check_model(proto_model * m, obj_map<enode, app *> const & root2value);
|
||||
|
||||
|
@ -144,6 +145,11 @@ namespace smt {
|
|||
*/
|
||||
virtual bool model_based() const = 0;
|
||||
|
||||
/**
|
||||
\brief Is "model based" instantiate allowed to instantiate this quantifier?
|
||||
*/
|
||||
virtual bool mbqi_enabled(quantifier *q) const {return true;}
|
||||
|
||||
/**
|
||||
\brief Give a change to the plugin to adjust the interpretation of unintepreted functions.
|
||||
It can basically change the "else" of each uninterpreted function.
|
||||
|
|
|
@ -475,10 +475,11 @@ namespace smt {
|
|||
bool theory_arith<Ext>::all_coeff_int(row const & r) const {
|
||||
typename vector<row_entry>::const_iterator it = r.begin_entries();
|
||||
typename vector<row_entry>::const_iterator end = r.end_entries();
|
||||
for (; it != end; ++it) {
|
||||
if (!it->is_dead() && !it->m_coeff.is_int())
|
||||
for (; it != end; ++it) {
|
||||
if (!it->is_dead() && !it->m_coeff.is_int())
|
||||
TRACE("gomory_cut", display_row(tout, r, true););
|
||||
return false;
|
||||
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -71,6 +71,7 @@ class lia2pb_tactic : public tactic {
|
|||
if (m_bm.has_lower(n, l, s) &&
|
||||
m_bm.has_upper(n, u, s) &&
|
||||
l.is_zero() &&
|
||||
!u.is_neg() &&
|
||||
u.get_num_bits() <= m_max_bits) {
|
||||
|
||||
return true;
|
||||
|
|
|
@ -1368,12 +1368,12 @@ void fpa2bv_converter::mk_fusedma(func_decl * f, unsigned num, expr * const * ar
|
|||
not_e_sgn = m_bv_util.mk_bv_not(e_sgn);
|
||||
not_f_sgn = m_bv_util.mk_bv_not(f_sgn);
|
||||
not_sign_bv = m_bv_util.mk_bv_not(sign_bv);
|
||||
res_sgn_c1 = m.mk_app(bvfid, OP_BAND, not_e_sgn, e_sgn, sign_bv);
|
||||
res_sgn_c1 = m.mk_app(bvfid, OP_BAND, not_e_sgn, f_sgn, sign_bv);
|
||||
res_sgn_c2 = m.mk_app(bvfid, OP_BAND, e_sgn, not_f_sgn, not_sign_bv);
|
||||
res_sgn_c3 = m.mk_app(bvfid, OP_BAND, e_sgn, f_sgn);
|
||||
expr * res_sgn_or_args[3] = { res_sgn_c1, res_sgn_c2, res_sgn_c3 };
|
||||
res_sgn = m_bv_util.mk_bv_or(3, res_sgn_or_args);
|
||||
|
||||
|
||||
sticky_raw = m_bv_util.mk_extract(sbits-5, 0, sig_abs);
|
||||
sticky = m_bv_util.mk_zero_extend(sbits+3, m.mk_app(bvfid, OP_BREDOR, sticky_raw.get()));
|
||||
dbg_decouple("fpa2bv_fma_add_sum_sticky", sticky);
|
||||
|
@ -1836,6 +1836,21 @@ void fpa2bv_converter::mk_to_float(func_decl * f, unsigned num, expr * const * a
|
|||
// Just keep it here, as there will be something else that uses it.
|
||||
mk_triple(args[0], args[1], args[2], result);
|
||||
}
|
||||
else if (num == 1 && m_bv_util.is_bv(args[0])) {
|
||||
sort * s = f->get_range();
|
||||
unsigned to_sbits = m_util.get_sbits(s);
|
||||
unsigned to_ebits = m_util.get_ebits(s);
|
||||
|
||||
expr * bv = args[0];
|
||||
int sz = m_bv_util.get_bv_size(bv);
|
||||
SASSERT((unsigned)sz == to_sbits + to_ebits);
|
||||
|
||||
m_bv_util.mk_extract(sz - 1, sz - 1, bv);
|
||||
mk_triple(m_bv_util.mk_extract(sz - 1, sz - 1, bv),
|
||||
m_bv_util.mk_extract(sz - to_ebits - 2, 0, bv),
|
||||
m_bv_util.mk_extract(sz - 2, sz - to_ebits - 1, bv),
|
||||
result);
|
||||
}
|
||||
else if (num == 2 && is_app(args[1]) && m_util.is_float(m.get_sort(args[1]))) {
|
||||
// We also support float to float conversion
|
||||
sort * s = f->get_range();
|
||||
|
@ -2043,6 +2058,27 @@ void fpa2bv_converter::mk_to_ieee_bv(func_decl * f, unsigned num, expr * const *
|
|||
result = m_bv_util.mk_concat(m_bv_util.mk_concat(sgn, e), s);
|
||||
}
|
||||
|
||||
void fpa2bv_converter::mk_fp(func_decl * f, unsigned num, expr * const * args, expr_ref & result) {
|
||||
SASSERT(num == 3);
|
||||
mk_triple(args[0], args[2], args[1], result);
|
||||
}
|
||||
|
||||
void fpa2bv_converter::mk_to_fp_unsigned(func_decl * f, unsigned num, expr * const * args, expr_ref & result) {
|
||||
NOT_IMPLEMENTED_YET();
|
||||
}
|
||||
|
||||
void fpa2bv_converter::mk_to_ubv(func_decl * f, unsigned num, expr * const * args, expr_ref & result) {
|
||||
NOT_IMPLEMENTED_YET();
|
||||
}
|
||||
|
||||
void fpa2bv_converter::mk_to_sbv(func_decl * f, unsigned num, expr * const * args, expr_ref & result) {
|
||||
NOT_IMPLEMENTED_YET();
|
||||
}
|
||||
|
||||
void fpa2bv_converter::mk_to_real(func_decl * f, unsigned num, expr * const * args, expr_ref & result) {
|
||||
NOT_IMPLEMENTED_YET();
|
||||
}
|
||||
|
||||
void fpa2bv_converter::split(expr * e, expr * & sgn, expr * & sig, expr * & exp) const {
|
||||
SASSERT(is_app_of(e, m_plugin->get_family_id(), OP_TO_FLOAT));
|
||||
SASSERT(to_app(e)->get_num_args() == 3);
|
||||
|
|
|
@ -122,7 +122,13 @@ public:
|
|||
void mk_is_subnormal(func_decl * f, unsigned num, expr * const * args, expr_ref & result);
|
||||
|
||||
void mk_to_float(func_decl * f, unsigned num, expr * const * args, expr_ref & result);
|
||||
void mk_to_ieee_bv(func_decl * f, unsigned num, expr * const * args, expr_ref & result);
|
||||
void mk_to_ieee_bv(func_decl * f, unsigned num, expr * const * args, expr_ref & result);
|
||||
|
||||
void mk_fp(func_decl * f, unsigned num, expr * const * args, expr_ref & result);
|
||||
void mk_to_fp_unsigned(func_decl * f, unsigned num, expr * const * args, expr_ref & result);
|
||||
void mk_to_ubv(func_decl * f, unsigned num, expr * const * args, expr_ref & result);
|
||||
void mk_to_sbv(func_decl * f, unsigned num, expr * const * args, expr_ref & result);
|
||||
void mk_to_real(func_decl * f, unsigned num, expr * const * args, expr_ref & result);
|
||||
|
||||
obj_map<func_decl, expr*> const & const2bv() const { return m_const2bv; }
|
||||
obj_map<func_decl, expr*> const & rm_const2bv() const { return m_rm_const2bv; }
|
||||
|
|
|
@ -139,6 +139,11 @@ struct fpa2bv_rewriter_cfg : public default_rewriter_cfg {
|
|||
case OP_FLOAT_IS_SIGN_MINUS: m_conv.mk_is_sign_minus(f, num, args, result); return BR_DONE;
|
||||
case OP_TO_FLOAT: m_conv.mk_to_float(f, num, args, result); return BR_DONE;
|
||||
case OP_TO_IEEE_BV: m_conv.mk_to_ieee_bv(f, num, args, result); return BR_DONE;
|
||||
case OP_FLOAT_FP: m_conv.mk_fp(f, num, args, result); return BR_DONE;
|
||||
case OP_FLOAT_TO_FP_UNSIGNED: m_conv.mk_to_fp_unsigned(f, num, args, result); return BR_DONE;
|
||||
case OP_FLOAT_TO_UBV: m_conv.mk_to_ubv(f, num, args, result); return BR_DONE;
|
||||
case OP_FLOAT_TO_SBV: m_conv.mk_to_sbv(f, num, args, result); return BR_DONE;
|
||||
case OP_FLOAT_TO_REAL: m_conv.mk_to_real(f, num, args, result); return BR_DONE;
|
||||
default:
|
||||
TRACE("fpa2bv", tout << "unsupported operator: " << f->get_name() << "\n";
|
||||
for (unsigned i = 0; i < num; i++) tout << mk_ismt2_pp(args[i], m()) << std::endl;);
|
||||
|
|
|
@ -36,3 +36,81 @@ tactic * mk_qffpa_tactic(ast_manager & m, params_ref const & p) {
|
|||
mk_sat_tactic(m, p),
|
||||
mk_fail_if_undecided_tactic());
|
||||
}
|
||||
|
||||
struct is_non_qffpa_predicate {
|
||||
struct found {};
|
||||
ast_manager & m;
|
||||
float_util u;
|
||||
|
||||
is_non_qffpa_predicate(ast_manager & _m) : m(_m), u(m) {}
|
||||
|
||||
void operator()(var *) { throw found(); }
|
||||
|
||||
void operator()(quantifier *) { throw found(); }
|
||||
|
||||
void operator()(app * n) {
|
||||
sort * s = get_sort(n);
|
||||
if (!m.is_bool(s) && !u.is_float(s) && !u.is_rm(s))
|
||||
throw found();
|
||||
family_id fid = n->get_family_id();
|
||||
if (fid == m.get_basic_family_id())
|
||||
return;
|
||||
if (fid == u.get_family_id())
|
||||
return;
|
||||
if (is_uninterp_const(n))
|
||||
return;
|
||||
|
||||
throw found();
|
||||
}
|
||||
};
|
||||
|
||||
struct is_non_qffpabv_predicate {
|
||||
struct found {};
|
||||
ast_manager & m;
|
||||
bv_util bu;
|
||||
float_util fu;
|
||||
|
||||
is_non_qffpabv_predicate(ast_manager & _m) : m(_m), bu(m), fu(m) {}
|
||||
|
||||
void operator()(var *) { throw found(); }
|
||||
|
||||
void operator()(quantifier *) { throw found(); }
|
||||
|
||||
void operator()(app * n) {
|
||||
sort * s = get_sort(n);
|
||||
if (!m.is_bool(s) && !fu.is_float(s) && !fu.is_rm(s) && !bu.is_bv_sort(s))
|
||||
throw found();
|
||||
family_id fid = n->get_family_id();
|
||||
if (fid == m.get_basic_family_id())
|
||||
return;
|
||||
if (fid == fu.get_family_id() || fid == bu.get_family_id())
|
||||
return;
|
||||
if (is_uninterp_const(n))
|
||||
return;
|
||||
|
||||
throw found();
|
||||
}
|
||||
};
|
||||
|
||||
class is_qffpa_probe : public probe {
|
||||
public:
|
||||
virtual result operator()(goal const & g) {
|
||||
return !test<is_non_qffpa_predicate>(g);
|
||||
}
|
||||
};
|
||||
|
||||
class is_qffpabv_probe : public probe {
|
||||
public:
|
||||
virtual result operator()(goal const & g) {
|
||||
return !test<is_non_qffpabv_predicate>(g);
|
||||
}
|
||||
};
|
||||
|
||||
probe * mk_is_qffpa_probe() {
|
||||
return alloc(is_qffpa_probe);
|
||||
}
|
||||
|
||||
probe * mk_is_qffpabv_probe() {
|
||||
return alloc(is_qffpabv_probe);
|
||||
}
|
||||
|
|
@ -30,4 +30,11 @@ tactic * mk_qffpa_tactic(ast_manager & m, params_ref const & p = params_ref());
|
|||
ADD_TACTIC("qffpabv", "(try to) solve goal using the tactic for QF_FPABV (floats+bit-vectors).", "mk_qffpa_tactic(m, p)")
|
||||
*/
|
||||
|
||||
probe * mk_is_qffpa_probe();
|
||||
probe * mk_is_qffpabv_probe();
|
||||
/*
|
||||
ADD_PROBE("is-qffpa", "true if the goal is in QF_FPA (FloatingPoints).", "mk_is_qffpa_probe()")
|
||||
ADD_PROBE("is-qffpabv", "true if the goal is in QF_FPABV (FloatingPoints+Bitvectors).", "mk_is_qffpabv_probe()")
|
||||
*/
|
||||
|
||||
#endif
|
||||
|
|
|
@ -27,6 +27,7 @@ Notes:
|
|||
#include"nra_tactic.h"
|
||||
#include"probe_arith.h"
|
||||
#include"quant_tactics.h"
|
||||
#include"qffpa_tactic.h"
|
||||
|
||||
tactic * mk_default_tactic(ast_manager & m, params_ref const & p) {
|
||||
tactic * st = using_params(and_then(mk_simplify_tactic(m),
|
||||
|
@ -37,7 +38,8 @@ tactic * mk_default_tactic(ast_manager & m, params_ref const & p) {
|
|||
cond(mk_is_qfnia_probe(), mk_qfnia_tactic(m),
|
||||
cond(mk_is_nra_probe(), mk_nra_tactic(m),
|
||||
cond(mk_is_lira_probe(), mk_lira_tactic(m, p),
|
||||
mk_smt_tactic())))))))),
|
||||
cond(mk_is_qffpabv_probe(), mk_qffpa_tactic(m, p),
|
||||
mk_smt_tactic()))))))))),
|
||||
p);
|
||||
return st;
|
||||
}
|
||||
|
|
|
@ -49,8 +49,11 @@ Revision History:
|
|||
// clear to the compiler what instructions should be used. E.g., for sqrt(), the Windows compiler selects
|
||||
// the x87 FPU, even when /arch:SSE2 is on.
|
||||
// Luckily, these are kind of standardized, at least for Windows/Linux/OSX.
|
||||
#ifdef __clang__
|
||||
#undef USE_INTRINSICS
|
||||
#else
|
||||
#include <emmintrin.h>
|
||||
|
||||
#endif
|
||||
|
||||
hwf_manager::hwf_manager() :
|
||||
m_mpz_manager(m_mpq_manager)
|
||||
|
|
|
@ -57,6 +57,11 @@ public:
|
|||
m_free_ids.finalize();
|
||||
}
|
||||
|
||||
unsigned show_hash(){
|
||||
unsigned h = string_hash((char *)&m_free_ids[0],m_free_ids.size()*sizeof(unsigned),17);
|
||||
return hash_u_u(h,m_next_id);
|
||||
}
|
||||
|
||||
/**
|
||||
\brief Return N if the range of ids generated by this module is in the set [0..N)
|
||||
*/
|
||||
|
|
|
@ -1400,6 +1400,10 @@ mpf_exp_t mpf_manager::mk_max_exp(unsigned ebits) {
|
|||
return m_mpz_manager.get_int64(m_powers2.m1(ebits-1, false));
|
||||
}
|
||||
|
||||
mpf_exp_t mpf_manager::unbias_exp(unsigned ebits, mpf_exp_t biased_exponent) {
|
||||
return biased_exponent - m_mpz_manager.get_int64(m_powers2.m1(ebits - 1, false));
|
||||
}
|
||||
|
||||
void mpf_manager::mk_nzero(unsigned ebits, unsigned sbits, mpf & o) {
|
||||
o.sbits = sbits;
|
||||
o.ebits = ebits;
|
||||
|
|
|
@ -182,6 +182,8 @@ public:
|
|||
mpf_exp_t mk_max_exp(unsigned ebits);
|
||||
mpf_exp_t mk_min_exp(unsigned ebits);
|
||||
|
||||
mpf_exp_t unbias_exp(unsigned ebits, mpf_exp_t biased_exponent);
|
||||
|
||||
/**
|
||||
\brief Return the biggest k s.t. 2^k <= a.
|
||||
|
||||
|
|
|
@ -104,7 +104,7 @@ public:
|
|||
}
|
||||
|
||||
bool is_int32() const {
|
||||
if (is_small()) return true;
|
||||
if (is_small() && is_int()) return true;
|
||||
// we don't assume that if it is small, then it is int32.
|
||||
if (!is_int64()) return false;
|
||||
int64 v = get_int64();
|
||||
|
|
Loading…
Reference in a new issue