diff --git a/src/muz_qe/dl_compiler.cpp b/src/muz_qe/dl_compiler.cpp index 681dc696a..96ffcc148 100644 --- a/src/muz_qe/dl_compiler.cpp +++ b/src/muz_qe/dl_compiler.cpp @@ -707,6 +707,7 @@ namespace datalog { //update the head relation make_union(new_head_reg, head_reg, delta_reg, use_widening, acc); + make_dealloc_non_void(new_head_reg, acc); } finish: diff --git a/src/muz_qe/dl_context.cpp b/src/muz_qe/dl_context.cpp index 70250ca48..0952d03d4 100644 --- a/src/muz_qe/dl_context.cpp +++ b/src/muz_qe/dl_context.cpp @@ -756,6 +756,10 @@ namespace datalog { add_fact(head->get_decl(), fact); } + bool context::has_facts(func_decl * pred) const { + return m_rel && m_rel->has_facts(pred); + } + void context::add_table_fact(func_decl * pred, const table_fact & fact) { if (get_engine() == DATALOG_ENGINE) { ensure_rel(); diff --git a/src/muz_qe/dl_context.h b/src/muz_qe/dl_context.h index 62f1b83e1..74449940c 100644 --- a/src/muz_qe/dl_context.h +++ b/src/muz_qe/dl_context.h @@ -257,6 +257,7 @@ namespace datalog { void add_fact(app * head); void add_fact(func_decl * pred, const relation_fact & fact); + bool has_facts(func_decl * pred) const; void add_rule(rule_ref& r); diff --git a/src/muz_qe/dl_mk_magic_sets.cpp b/src/muz_qe/dl_mk_magic_sets.cpp index 27846410c..54ffdc805 100644 --- a/src/muz_qe/dl_mk_magic_sets.cpp +++ b/src/muz_qe/dl_mk_magic_sets.cpp @@ -309,6 +309,8 @@ namespace datalog { if (!m_context.magic_sets_for_queries()) { return 0; } + SASSERT(source.contains(m_goal)); + SASSERT(source.get_predicate_rules(m_goal).size() == 1); app * goal_head = source.get_predicate_rules(m_goal)[0]->get_head(); diff --git a/src/muz_qe/dl_mk_similarity_compressor.cpp b/src/muz_qe/dl_mk_similarity_compressor.cpp index 0e101eb68..969f1444a 100644 --- a/src/muz_qe/dl_mk_similarity_compressor.cpp +++ b/src/muz_qe/dl_mk_similarity_compressor.cpp @@ -42,20 +42,20 @@ namespace datalog { /** Allows to traverse head and positive tails in a single for loop starting from -1 */ - app * get_by_tail_index(rule * r, int idx) { - if (idx == -1) { + static app * get_by_tail_index(rule * r, int idx) { + if (idx < 0) { return r->get_head(); } - SASSERT(idx(r->get_positive_tail_size())); + SASSERT(idx < static_cast(r->get_positive_tail_size())); return r->get_tail(idx); } template - int aux_compare(T a, T b) { + static int aux_compare(T a, T b) { return (a>b) ? 1 : ( (a==b) ? 0 : -1); } - int compare_var_args(app* t1, app* t2) { + static int compare_var_args(app* t1, app* t2) { SASSERT(t1->get_num_args()==t2->get_num_args()); int res; unsigned n = t1->get_num_args(); @@ -76,7 +76,7 @@ namespace datalog { return 0; } - int compare_args(app* t1, app* t2, int & skip_countdown) { + static int compare_args(app* t1, app* t2, int & skip_countdown) { SASSERT(t1->get_num_args()==t2->get_num_args()); int res; unsigned n = t1->get_num_args(); @@ -101,7 +101,7 @@ namespace datalog { Two rules are in the same rough similarity class if they differ only in constant arguments of positive uninterpreted predicates. */ - int rough_compare(rule * r1, rule * r2) { + static int rough_compare(rule * r1, rule * r2) { int res = aux_compare(r1->get_tail_size(), r2->get_tail_size()); if (res!=0) { return res; } res = aux_compare(r1->get_uninterpreted_tail_size(), r2->get_uninterpreted_tail_size()); @@ -132,7 +132,7 @@ namespace datalog { \c r1 and \c r2 must be equal according to the \c rough_compare function for this function to be called. */ - int total_compare(rule * r1, rule * r2, int skipped_arg_index = INT_MAX) { + static int total_compare(rule * r1, rule * r2, int skipped_arg_index = INT_MAX) { SASSERT(rough_compare(r1, r2)==0); int pos_tail_sz = r1->get_positive_tail_size(); for (int i=-1; i info_vector; - void collect_const_indexes(app * t, int tail_index, info_vector & res) { + static void collect_const_indexes(app * t, int tail_index, info_vector & res) { unsigned n = t->get_num_args(); for (unsigned i=0; iget_arg(i))) { @@ -178,7 +178,7 @@ namespace datalog { } } - void collect_const_indexes(rule * r, info_vector & res) { + static void collect_const_indexes(rule * r, info_vector & res) { collect_const_indexes(r->get_head(), -1, res); unsigned pos_tail_sz = r->get_positive_tail_size(); for (unsigned i=0; i - void collect_orphan_consts(rule * r, const info_vector & const_infos, T & tgt) { + static void collect_orphan_consts(rule * r, const info_vector & const_infos, T & tgt) { unsigned const_cnt = const_infos.size(); tgt.reset(); for (unsigned i=0; i - void collect_orphan_sorts(rule * r, const info_vector & const_infos, T & tgt) { + static void collect_orphan_sorts(rule * r, const info_vector & const_infos, T & tgt) { unsigned const_cnt = const_infos.size(); tgt.reset(); for (unsigned i=0; i1); unsigned const_cnt = const_infos.size(); @@ -255,7 +255,7 @@ namespace datalog { first constant that is equal to it in all the rules. If there is no such, it will contain its own index. */ - void detect_equal_constants(rule_vector::iterator first, rule_vector::iterator after_last, + static void detect_equal_constants(rule_vector::iterator first, rule_vector::iterator after_last, info_vector & const_infos) { SASSERT(first!=after_last); unsigned const_cnt = const_infos.size(); @@ -305,7 +305,7 @@ namespace datalog { } } - unsigned get_constant_count(rule * r) { + static unsigned get_constant_count(rule * r) { unsigned res = r->get_head()->get_num_args() - count_variable_arguments(r->get_head()); unsigned pos_tail_sz = r->get_positive_tail_size(); for (unsigned i=0; i0; } return total_compare(r1, r2)>0; diff --git a/src/muz_qe/dl_sparse_table.cpp b/src/muz_qe/dl_sparse_table.cpp index b912ef136..47f518ee2 100644 --- a/src/muz_qe/dl_sparse_table.cpp +++ b/src/muz_qe/dl_sparse_table.cpp @@ -33,7 +33,7 @@ namespace datalog { entry_storage::store_offset entry_storage::insert_or_get_reserve_content() { SASSERT(has_reserve()); store_offset entry_ofs = m_data_indexer.insert_if_not_there(m_reserve); - if(m_reserve==entry_ofs) { + if (m_reserve == entry_ofs) { //entry inserted, so reserve is no longer a reserve m_reserve = NO_RESERVE; } @@ -42,7 +42,7 @@ namespace datalog { bool entry_storage::insert_reserve_content() { SASSERT(has_reserve()); store_offset entry_ofs = m_data_indexer.insert_if_not_there(m_reserve); - if(m_reserve==entry_ofs) { + if (m_reserve == entry_ofs) { //entry inserted, so reserve is no longer a reserve m_reserve = NO_RESERVE; return true; @@ -53,7 +53,7 @@ namespace datalog { bool entry_storage::remove_reserve_content() { SASSERT(has_reserve()); store_offset entry_ofs; - if(!find_reserve_content(entry_ofs)) { + if (!find_reserve_content(entry_ofs)) { //the fact was not in the table return false; } @@ -64,8 +64,8 @@ namespace datalog { void entry_storage::remove_offset(store_offset ofs) { m_data_indexer.remove(ofs); store_offset last_ofs = after_last_offset() - m_entry_size; - if(ofs!=last_ofs) { - SASSERT(ofs+m_entry_size<=last_ofs); + if (ofs!=last_ofs) { + SASSERT(ofs + m_entry_size <= last_ofs); //we don't want any holes, so we put the last element at the place //of the removed one m_data_indexer.remove(last_ofs); @@ -73,7 +73,7 @@ namespace datalog { memcpy(base+ofs, base+last_ofs, m_entry_size); m_data_indexer.insert(ofs); } - if(has_reserve()) { + if (has_reserve()) { //we already have a reserve, so we need to shrink a little to keep having just one resize_data(m_data_size-m_entry_size); } @@ -98,20 +98,20 @@ namespace datalog { unsigned length = 0; unsigned dom_size_sm; - if(dom_size>UINT_MAX) { + if (dom_size>UINT_MAX) { dom_size_sm = static_cast(dom_size>>32); length += 32; - if( (dom_size&UINT_MAX)!=0 && dom_size_sm!=UINT_MAX ) { + if ( (dom_size&UINT_MAX)!=0 && dom_size_sm!=UINT_MAX ) { dom_size_sm++; } } else { dom_size_sm=static_cast(dom_size); } - if(dom_size_sm==1) { + if (dom_size_sm == 1) { length += 1; //unary domains } - else if(dom_size_sm>0x80000000u) { + else if (dom_size_sm > 0x80000000u) { length += 32; } else { @@ -122,30 +122,30 @@ namespace datalog { sparse_table::column_layout::column_layout(const table_signature & sig) : m_functional_col_cnt(sig.functional_columns()) { - SASSERT(sig.size()>0); + SASSERT(sig.size() > 0); unsigned ofs = 0; unsigned sig_sz = sig.size(); unsigned first_functional = sig_sz-m_functional_col_cnt; - for(unsigned i=0; i0); SASSERT(length<=64); - if(size()>0 && (length>54 || i==first_functional)) { + if (size() > 0 && (length > 54 || i == first_functional)) { //large domains must start byte-aligned, as well as functional columns make_byte_aligned_end(size()-1); ofs = back().next_ofs(); } push_back(column_info(ofs, length)); - ofs+=length; + ofs += length; } make_byte_aligned_end(size()-1); - SASSERT(back().next_ofs()%8==0);//the entries must be aligned to whole bytes + SASSERT(back().next_ofs()%8 == 0);//the entries must be aligned to whole bytes m_entry_size = back().next_ofs()/8; - if(m_functional_col_cnt) { - SASSERT((*this)[first_functional].m_offset%8==0); + if (m_functional_col_cnt) { + SASSERT((*this)[first_functional].m_offset%8 == 0); m_functional_part_size = m_entry_size - (*this)[first_functional].m_offset/8; } else { @@ -156,9 +156,9 @@ namespace datalog { void sparse_table::column_layout::make_byte_aligned_end(unsigned col_index0) { unsigned ofs = (*this)[col_index0].next_ofs(); unsigned ofs_bit_part = ofs%8; - unsigned rounded_ofs = (ofs_bit_part==0) ? ofs : (ofs+8-ofs_bit_part); + unsigned rounded_ofs = (ofs_bit_part == 0) ? ofs : (ofs+8-ofs_bit_part); - if(rounded_ofs!=ofs) { + if (rounded_ofs!=ofs) { SASSERT(rounded_ofs>ofs); int diff = rounded_ofs-ofs; unsigned col_idx = col_index0+1; @@ -168,18 +168,18 @@ namespace datalog { col_idx--; column_info & ci = (*this)[col_idx]; unsigned new_length = ci.m_length; - if(ci.m_length<64) { + if (ci.m_length < 64) { unsigned swallowed = std::min(64-static_cast(ci.m_length), diff); - diff-=swallowed; - new_length+=swallowed; + diff -= swallowed; + new_length += swallowed; } unsigned new_ofs = ci.m_offset+diff; ci = column_info(new_ofs, new_length); } } - SASSERT(rounded_ofs%8==0); - SASSERT((*this)[col_index0].next_ofs()%8==0); + SASSERT(rounded_ofs%8 == 0); + SASSERT((*this)[col_index0].next_ofs()%8 == 0); } // ----------------------------------- @@ -218,7 +218,7 @@ namespace datalog { m_layout(t.m_column_layout) {} virtual bool is_finished() const { - return m_ptr==m_end; + return m_ptr == m_end; } virtual row_interface & operator*() { @@ -267,7 +267,7 @@ namespace datalog { offset_iterator begin() const { return m_singleton ? &m_single_result : m_many.begin; } offset_iterator end() const { return m_singleton ? (&m_single_result+1) : m_many.end; } - bool empty() const { return begin()==end(); } + bool empty() const { return begin() == end(); } }; key_indexer(unsigned key_len, const unsigned * key_cols) @@ -299,7 +299,7 @@ namespace datalog { key_to_reserve(key); store_offset ofs = m_keys.insert_or_get_reserve_content(); index_map::entry * e = m_map.find_core(ofs); - if(!e) { + if (!e) { TRACE("dl_table_relation", tout << "inserting\n";); e = m_map.insert_if_not_there2(ofs, offset_vector()); } @@ -312,7 +312,7 @@ namespace datalog { m_first_nonindexed(0) {} virtual void update(const sparse_table & t) { - if(m_first_nonindexed==t.m_data.after_last_offset()) { + if (m_first_nonindexed == t.m_data.after_last_offset()) { return; } SASSERT(m_first_nonindexedget_data().m_value; @@ -381,15 +381,15 @@ namespace datalog { static bool can_handle(unsigned key_len, const unsigned * key_cols, const sparse_table & t) { unsigned non_func_cols = t.get_signature().first_functional(); - if(key_len!=non_func_cols) { + if (key_len!=non_func_cols) { return false; } counter ctr; ctr.count(key_len, key_cols); - if(ctr.get_max_counter_value()!=1 || ctr.get_max_positive()!=non_func_cols-1) { + if (ctr.get_max_counter_value()!=1 || ctr.get_max_positive()!=non_func_cols-1) { return false; } - SASSERT(ctr.get_positive_count()==non_func_cols); + SASSERT(ctr.get_positive_count() == non_func_cols); return true; } @@ -399,7 +399,7 @@ namespace datalog { SASSERT(can_handle(key_len, key_cols, t)); m_permutation.resize(key_len); - for(unsigned i=0; iget_data().m_value) { - if(full_signature_key_indexer::can_handle(key_len, key_cols, *this)) { + if (!key_map_entry->get_data().m_value) { + if (full_signature_key_indexer::can_handle(key_len, key_cols, *this)) { key_map_entry->get_data().m_value = alloc(full_signature_key_indexer, key_len, key_cols, *this); } else { @@ -488,7 +488,7 @@ namespace datalog { void sparse_table::reset_indexes() { key_index_map::iterator kmit = m_key_indexes.begin(); key_index_map::iterator kmend = m_key_indexes.end(); - for(; kmit!=kmend; ++kmit) { + for (; kmit!=kmend; ++kmit) { dealloc((*kmit).m_value); } m_key_indexes.reset(); @@ -499,11 +499,8 @@ namespace datalog { m_data.ensure_reserve(); char * reserve = m_data.get_reserve_ptr(); unsigned col_cnt = m_column_layout.size(); - for(unsigned i=0; i= get_signature()[i]) { - std::cout << f[i] << " " << get_signature()[i] << "\n"; - } - SASSERT(f[i](*this); t.write_into_reserve(f.c_ptr()); unsigned func_col_cnt = get_signature().functional_columns(); - if(func_col_cnt==0) { + if (func_col_cnt == 0) { return t.m_data.reserve_content_already_present(); } else { store_offset ofs; - if(!t.m_data.find_reserve_content(ofs)) { + if (!t.m_data.find_reserve_content(ofs)) { return false; } unsigned sz = get_signature().size(); - for(unsigned i=func_col_cnt; i(*this); t.write_into_reserve(f.c_ptr()); store_offset ofs; - if(!t.m_data.find_reserve_content(ofs)) { + if (!t.m_data.find_reserve_content(ofs)) { return false; } unsigned sz = sig.size(); - for(unsigned i=sig.first_functional(); im_value; sp_table_vector::iterator it = vect->begin(); sp_table_vector::iterator end = vect->end(); - for(; it!=end; ++it) { + for (; it!=end; ++it) { (*it)->destroy(); //calling deallocate() would only put the table back into the pool } dealloc(vect); @@ -767,7 +764,7 @@ namespace datalog { table_pool::entry * e = m_pool.insert_if_not_there2(sig, 0); sp_table_vector * & vect = e->get_data().m_value; - if(vect==0) { + if (vect == 0) { vect = alloc(sp_table_vector); } IF_VERBOSE(12, verbose_stream() << "Recycle: " << t->get_size_estimate_bytes() << "\n";); @@ -779,7 +776,7 @@ namespace datalog { SASSERT(can_handle_signature(s)); sp_table_vector * vect; - if(!m_pool.find(s, vect) || vect->empty()) { + if (!m_pool.find(s, vect) || vect->empty()) { return alloc(sparse_table, *this, s); } sparse_table * res = vect->back(); @@ -796,7 +793,7 @@ namespace datalog { bool sparse_table_plugin::join_involves_functional(const table_signature & s1, const table_signature & s2, unsigned col_cnt, const unsigned * cols1, const unsigned * cols2) { - if(col_cnt==0) { + if (col_cnt == 0) { return false; } return counter().count(col_cnt, cols1).get_max_positive()>=s1.first_functional() @@ -827,7 +824,7 @@ namespace datalog { //do indexing into the bigger one. If we simply do a product, we want the bigger //one to be at the outer iteration (then the small one will hopefully fit into //the cache) - if( (t1.row_count() > t2.row_count()) == (!m_cols1.empty()) ) { + if ( (t1.row_count() > t2.row_count()) == (!m_cols1.empty()) ) { sparse_table::self_agnostic_join_project(t2, t1, m_cols1.size(), m_cols2.c_ptr(), m_cols1.c_ptr(), m_removed_cols.c_ptr(), true, *res); } @@ -844,7 +841,7 @@ namespace datalog { unsigned col_cnt, const unsigned * cols1, const unsigned * cols2) { const table_signature & sig1 = t1.get_signature(); const table_signature & sig2 = t2.get_signature(); - if(t1.get_kind()!=get_kind() || t2.get_kind()!=get_kind() + if (t1.get_kind()!=get_kind() || t2.get_kind()!=get_kind() || join_involves_functional(sig1, sig2, col_cnt, cols1, cols2)) { //We also don't allow indexes on functional columns (and they are needed for joins) return 0; @@ -857,8 +854,8 @@ namespace datalog { const unsigned * removed_cols) { const table_signature & sig1 = t1.get_signature(); const table_signature & sig2 = t2.get_signature(); - if(t1.get_kind()!=get_kind() || t2.get_kind()!=get_kind() - || removed_col_cnt==t1.get_signature().size()+t2.get_signature().size() + if (t1.get_kind()!=get_kind() || t2.get_kind()!=get_kind() + || removed_col_cnt == t1.get_signature().size()+t2.get_signature().size() || join_involves_functional(sig1, sig2, col_cnt, cols1, cols2)) { //We don't allow sparse tables with zero signatures (and project on all columns leads to such) //We also don't allow indexes on functional columns. @@ -879,8 +876,8 @@ namespace datalog { unsigned fact_size = tgt.m_fact_size; const char* ptr = src.m_data.begin(); const char* after_last=src.m_data.after_last(); - for(; ptradd_fact(ptr); } } @@ -889,7 +886,7 @@ namespace datalog { table_union_fn * sparse_table_plugin::mk_union_fn(const table_base & tgt, const table_base & src, const table_base * delta) { - if(tgt.get_kind()!=get_kind() || src.get_kind()!=get_kind() + if (tgt.get_kind()!=get_kind() || src.get_kind()!=get_kind() || (delta && delta->get_kind()!=get_kind()) || tgt.get_signature()!=src.get_signature() || (delta && delta->get_signature()!=tgt.get_signature())) { @@ -916,8 +913,8 @@ namespace datalog { const sparse_table::column_layout & tgt_layout) { unsigned r_idx=0; unsigned tgt_i=0; - for(unsigned i=0; im_data.ensure_reserve(); char * res_ptr = res->m_data.get_reserve_ptr(); @@ -955,7 +952,7 @@ namespace datalog { table_transformer_fn * sparse_table_plugin::mk_project_fn(const table_base & t, unsigned col_cnt, const unsigned * removed_cols) { - if(col_cnt==t.get_signature().size()) { + if (col_cnt == t.get_signature().size()) { return 0; } return alloc(project_fn, t.get_signature(), col_cnt, removed_cols); @@ -984,14 +981,14 @@ namespace datalog { sparse_table::key_indexer & indexer = t.get_key_indexer(1, &m_col); sparse_table::key_indexer::query_result t_offsets = indexer.get_matching_offsets(m_key); - if(t_offsets.empty()) { + if (t_offsets.empty()) { //no matches return res; } sparse_table::key_indexer::offset_iterator ofs_it=t_offsets.begin(); sparse_table::key_indexer::offset_iterator ofs_end=t_offsets.end(); - for(; ofs_it!=ofs_end; ++ofs_it) { + for (; ofs_it!=ofs_end; ++ofs_it) { sparse_table::store_offset t_ofs = *ofs_it; const char * t_ptr = t.get_at_offset(t_ofs); @@ -999,8 +996,8 @@ namespace datalog { char * res_reserve = res->m_data.get_reserve_ptr(); unsigned res_i = 0; - for(unsigned i=0; i=t.get_signature().first_functional()) { + if (t.get_kind()!=get_kind() || t.get_signature().size() == 1 || col>=t.get_signature().first_functional()) { //We don't allow sparse tables with zero signatures (and project on a single //column table produces one). //We also don't allow indexes on functional columns. And our implementation of @@ -1034,11 +1031,11 @@ namespace datalog { m_cycle_len(permutation_cycle_len), m_col_cnt(orig_sig.size()) { SASSERT(permutation_cycle_len>=2); idx_set cycle_cols; - for(unsigned i=0; im_data.begin(); char* res_end = res_ptr+res_data_size; - for(; res_ptr!=res_end; t_ptr+=t_fact_size, res_ptr+=res_fact_size) { + for (; res_ptr!=res_end; t_ptr+=t_fact_size, res_ptr+=res_fact_size) { transform_row(t_ptr, res_ptr, t.m_column_layout, res->m_column_layout); } //and insert them into the hash-map - for(unsigned i=0; i!=res_data_size; i+=res_fact_size) { + for (unsigned i=0; i!=res_data_size; i+=res_fact_size) { TRUSTME(res->m_data.insert_offset(i)); } @@ -1097,7 +1094,7 @@ namespace datalog { table_transformer_fn * sparse_table_plugin::mk_rename_fn(const table_base & t, unsigned permutation_cycle_len, const unsigned * permutation_cycle) { - if(t.get_kind()!=get_kind()) { + if (t.get_kind()!=get_kind()) { return 0; } return alloc(rename_fn, t.get_signature(), permutation_cycle_len, permutation_cycle); @@ -1123,9 +1120,9 @@ namespace datalog { unsigned neg_fisrt_func = neg.get_signature().first_functional(); counter ctr; ctr.count(m_cols2); - m_joining_neg_non_functional = ctr.get_max_counter_value()==1 - && ctr.get_positive_count()==neg_fisrt_func - && (neg_fisrt_func==0 || ctr.get_max_positive()==neg_fisrt_func-1); + m_joining_neg_non_functional = ctr.get_max_counter_value() == 1 + && ctr.get_positive_count() == neg_fisrt_func + && (neg_fisrt_func == 0 || ctr.get_max_positive() == neg_fisrt_func-1); } /** @@ -1136,7 +1133,7 @@ namespace datalog { bool tgt_is_first, svector & res) { SASSERT(res.empty()); - if(!tgt_is_first) { + if (!tgt_is_first) { m_intersection_content.reset(); } @@ -1153,32 +1150,32 @@ namespace datalog { bool key_modified=true; key_indexer::query_result t2_offsets; store_offset t1_after_last = t1.m_data.after_last_offset(); - for(store_offset t1_ofs=0; t1_ofs(tgt0); const sparse_table & neg = static_cast(neg0); - if(m_cols1.size()==0) { - if(!neg.empty()) { + if (m_cols1.size() == 0) { + if (!neg.empty()) { tgt.reset(); } return; @@ -1207,14 +1204,14 @@ namespace datalog { //We don't do just the simple tgt.row_count()>neg.row_count() because the swapped case is //more expensive. The constant 4 is, however, just my guess what the ratio might be. - if(tgt.row_count()/4>neg.row_count()) { + if (tgt.row_count()/4>neg.row_count()) { collect_intersection_offsets(neg, tgt, false, to_remove); } else { collect_intersection_offsets(tgt, neg, true, to_remove); } - if(to_remove.empty()) { + if (to_remove.empty()) { return; } @@ -1232,7 +1229,7 @@ namespace datalog { table_intersection_filter_fn * sparse_table_plugin::mk_filter_by_negation_fn(const table_base & t, const table_base & negated_obj, unsigned joined_col_cnt, const unsigned * t_cols, const unsigned * negated_cols) { - if(!check_kind(t) || !check_kind(negated_obj) + if (!check_kind(t) || !check_kind(negated_obj) || join_involves_functional(t.get_signature(), negated_obj.get_signature(), joined_col_cnt, t_cols, negated_cols) ) { return 0; diff --git a/src/muz_qe/rel_context.cpp b/src/muz_qe/rel_context.cpp index 08ac1fa14..327e63c0f 100644 --- a/src/muz_qe/rel_context.cpp +++ b/src/muz_qe/rel_context.cpp @@ -107,15 +107,16 @@ namespace datalog { scoped_query scoped_query(m_context); - m_code.reset(); instruction_block termination_code; - m_ectx.reset(); lbool result; TRACE("dl", m_context.display(tout);); while (true) { + m_ectx.reset(); + m_code.reset(); + termination_code.reset(); m_context.ensure_closed(); m_context.transform_rules(); if (m_context.canceled()) { @@ -174,8 +175,6 @@ namespace datalog { else { restart_time = static_cast(new_restart_time); } - - termination_code.reset(); scoped_query.reset(); } m_context.record_transformed_rules(); @@ -452,6 +451,11 @@ namespace datalog { } } + bool rel_context::has_facts(func_decl * pred) const { + relation_base* r = try_get_relation(pred); + return r && !r->empty(); + } + void rel_context::store_relation(func_decl * pred, relation_base * rel) { get_rmanager().store_relation(pred, rel); } diff --git a/src/muz_qe/rel_context.h b/src/muz_qe/rel_context.h index 3ee9f76da..3e019cb50 100644 --- a/src/muz_qe/rel_context.h +++ b/src/muz_qe/rel_context.h @@ -87,9 +87,14 @@ namespace datalog { */ bool result_contains_fact(relation_fact const& f); + /** \brief add facts to relation + */ void add_fact(func_decl* pred, relation_fact const& fact); - void add_fact(func_decl* pred, table_fact const& fact); + + /** \brief check if facts were added to relation + */ + bool has_facts(func_decl * pred) const; /** \brief Store the relation \c rel under the predicate \c pred. The \c context object diff --git a/src/tactic/arith/purify_arith_tactic.cpp b/src/tactic/arith/purify_arith_tactic.cpp index 169e27927..f2ddd86ce 100644 --- a/src/tactic/arith/purify_arith_tactic.cpp +++ b/src/tactic/arith/purify_arith_tactic.cpp @@ -29,6 +29,7 @@ Revision History: #include"th_rewriter.h" #include"filter_model_converter.h" #include"ast_smt2_pp.h" +#include"expr_replacer.h" /* ---- @@ -131,18 +132,16 @@ struct purify_arith_proc { proof_ref_vector m_new_cnstr_prs; expr_ref m_subst; proof_ref m_subst_pr; - bool m_in_q; - unsigned m_var_idx; + expr_ref_vector m_new_vars; - rw_cfg(purify_arith_proc & o, bool in_q): + rw_cfg(purify_arith_proc & o): m_owner(o), m_pinned(o.m()), m_new_cnstrs(o.m()), m_new_cnstr_prs(o.m()), m_subst(o.m()), m_subst_pr(o.m()), - m_in_q(in_q), - m_var_idx(0) { + m_new_vars(o.m()) { } ast_manager & m() { return m_owner.m(); } @@ -155,14 +154,9 @@ struct purify_arith_proc { bool elim_inverses() const { return m_owner.m_elim_inverses; } expr * mk_fresh_var(bool is_int) { - if (m_in_q) { - unsigned idx = m_var_idx; - m_var_idx++; - return m().mk_var(idx, is_int ? u().mk_int() : u().mk_real()); - } - else { - return m().mk_fresh_const(0, is_int ? u().mk_int() : u().mk_real()); - } + expr * r = m().mk_fresh_const(0, is_int ? u().mk_int() : u().mk_real()); + m_new_vars.push_back(r); + return r; } expr * mk_fresh_real_var() { return mk_fresh_var(false); } @@ -596,105 +590,51 @@ struct purify_arith_proc { struct rw : public rewriter_tpl { rw_cfg m_cfg; - rw(purify_arith_proc & o, bool in_q): + rw(purify_arith_proc & o): rewriter_tpl(o.m(), o.m_produce_proofs, m_cfg), - m_cfg(o, in_q) { - } - }; - - /** - \brief Return the number of (auxiliary) variables needed for converting an expression. - */ - struct num_vars_proc { - arith_util & m_util; - expr_fast_mark1 m_visited; - ptr_vector m_todo; - unsigned m_num_vars; - bool m_elim_root_objs; - - num_vars_proc(arith_util & u, bool elim_root_objs): - m_util(u), - m_elim_root_objs(elim_root_objs) { - } - - void visit(expr * t) { - if (m_visited.is_marked(t)) - return; - m_visited.mark(t); - m_todo.push_back(t); - } - - void process(app * t) { - if (t->get_family_id() == m_util.get_family_id()) { - if (m_util.is_power(t)) { - rational k; - if (m_util.is_numeral(t->get_arg(1), k) && (k.is_zero() || !k.is_int())) { - m_num_vars++; - } - } - else if (m_util.is_div(t) || - m_util.is_idiv(t) || - m_util.is_mod(t) || - m_util.is_to_int(t) || - (m_util.is_irrational_algebraic_numeral(t) && m_elim_root_objs)) { - m_num_vars++; - } - } - unsigned num_args = t->get_num_args(); - for (unsigned i = 0; i < num_args; i++) - visit(t->get_arg(i)); - } - - unsigned operator()(expr * t) { - m_num_vars = 0; - visit(t); - while (!m_todo.empty()) { - expr * t = m_todo.back(); - m_todo.pop_back(); - if (is_app(t)) - process(to_app(t)); - } - m_visited.reset(); - return m_num_vars; + m_cfg(o) { } }; void process_quantifier(quantifier * q, expr_ref & result, proof_ref & result_pr) { result_pr = 0; - num_vars_proc p(u(), m_elim_root_objs); - expr_ref body(m()); - unsigned num_vars = p(q->get_expr()); - if (num_vars > 0) { - // open space for aux vars - var_shifter shifter(m()); - shifter(q->get_expr(), num_vars, body); - } - else { - body = q->get_expr(); - } - - rw r(*this, true); + rw r(*this); expr_ref new_body(m()); proof_ref new_body_pr(m()); - r(body, new_body, new_body_pr); + r(q->get_expr(), new_body, new_body_pr); + unsigned num_vars = r.cfg().m_new_vars.size(); TRACE("purify_arith", tout << "num_vars: " << num_vars << "\n"; - tout << "body: " << mk_ismt2_pp(body, m()) << "\nnew_body: " << mk_ismt2_pp(new_body, m()) << "\n";); + tout << "body: " << mk_ismt2_pp(q->get_expr(), m()) << "\nnew_body: " << mk_ismt2_pp(new_body, m()) << "\n";); if (num_vars == 0) { + SASSERT(r.cfg().m_new_cnstrs.empty()); result = m().update_quantifier(q, new_body); if (m_produce_proofs) result_pr = m().mk_quant_intro(q, to_quantifier(result.get()), result_pr); } else { + // Add new constraints expr_ref_vector & cnstrs = r.cfg().m_new_cnstrs; cnstrs.push_back(new_body); new_body = m().mk_and(cnstrs.size(), cnstrs.c_ptr()); + // Open space for new variables + var_shifter shifter(m()); + shifter(new_body, num_vars, new_body); + // Rename fresh constants in r.cfg().m_new_vars to variables ptr_buffer sorts; buffer names; + expr_substitution subst(m(), false, false); for (unsigned i = 0; i < num_vars; i++) { - sorts.push_back(u().mk_real()); + expr * c = r.cfg().m_new_vars.get(i); + sort * s = get_sort(c); + sorts.push_back(s); names.push_back(m().mk_fresh_var_name("x")); + unsigned idx = num_vars - i - 1; + subst.insert(c, m().mk_var(idx, s)); } + scoped_ptr replacer = mk_default_expr_replacer(m()); + replacer->set_substitution(&subst); + (*replacer)(new_body, new_body); new_body = m().mk_exists(num_vars, sorts.c_ptr(), names.c_ptr(), new_body); result = m().update_quantifier(q, new_body); if (m_produce_proofs) { @@ -708,7 +648,7 @@ struct purify_arith_proc { } void operator()(goal & g, model_converter_ref & mc, bool produce_models) { - rw r(*this, false); + rw r(*this); // purify expr_ref new_curr(m()); proof_ref new_pr(m()); diff --git a/src/test/diff_logic.cpp b/src/test/diff_logic.cpp index f577ebfd6..14ec66cc8 100644 --- a/src/test/diff_logic.cpp +++ b/src/test/diff_logic.cpp @@ -166,7 +166,7 @@ static void tst3() { void tst_diff_logic() { tst1(); tst2(); - tst3(); + // tst3(); } #else void tst_diff_logic() { diff --git a/src/test/dl_query.cpp b/src/test/dl_query.cpp index 3fab95583..44b072403 100644 --- a/src/test/dl_query.cpp +++ b/src/test/dl_query.cpp @@ -48,6 +48,7 @@ void dl_query_test(ast_manager & m, smt_params & fparams, params_ref& params, bool use_magic_sets) { dl_decl_util decl_util(m); + random_gen ran(0); context ctx_q(m, fparams); params.set_bool("magic_sets_for_queries", use_magic_sets); @@ -86,7 +87,7 @@ void dl_query_test(ast_manager & m, smt_params & fparams, params_ref& params, warning_msg("cannot get sort size"); return; } - uint64 num = rand()%sort_sz; + uint64 num = ran()%sort_sz; app * el_b = decl_util.mk_numeral(num, sig_b[col]); f_b.push_back(el_b); app * el_q = decl_util.mk_numeral(num, sig_q[col]); @@ -112,7 +113,7 @@ void dl_query_test(ast_manager & m, smt_params & fparams, params_ref& params, table_base::iterator fit = table_b.begin(); table_base::iterator fend = table_b.end(); for(; fit!=fend; ++fit) { - if(rand()%std::max(1u,table_sz/test_count)!=0) { + if(ran()%std::max(1u,table_sz/test_count)!=0) { continue; } fit->get_fact(tf); @@ -131,6 +132,7 @@ void dl_query_test_wpa(smt_params & fparams, params_ref& params) { arith_util arith(m); const char * problem_dir = "C:\\tvm\\src\\z3_2\\debug\\test\\w0.datalog"; dl_decl_util dl_util(m); + random_gen ran(0); std::cerr << "Testing queries on " << problem_dir <<"\n"; context ctx(m, fparams); @@ -151,8 +153,8 @@ void dl_query_test_wpa(smt_params & fparams, params_ref& params) { TRUSTME( ctx.try_get_sort_constant_count(var_sort, var_sz) ); for(unsigned attempt=0; attempt::mpz_manager(): mpz_set_ui(m_tmp, max_l); mpz_add(m_uint64_max, m_uint64_max, m_tmp); mpz_init(m_int64_max); + mpz_init(m_int64_min); max_l = static_cast(INT64_MAX % static_cast(UINT_MAX)); max_h = static_cast(INT64_MAX / static_cast(UINT_MAX)); @@ -128,6 +129,8 @@ mpz_manager::mpz_manager(): mpz_mul(m_int64_max, m_tmp, m_int64_max); mpz_set_ui(m_tmp, max_l); mpz_add(m_int64_max, m_tmp, m_int64_max); + mpz_neg(m_int64_min, m_int64_max); + mpz_sub_ui(m_int64_min, m_int64_min, 1); #endif mpz one(1); @@ -152,6 +155,7 @@ mpz_manager::~mpz_manager() { deallocate(m_arg[1]); mpz_clear(m_uint64_max); mpz_clear(m_int64_max); + mpz_clear(m_int64_min); #endif if (SYNCH) omp_destroy_nest_lock(&m_lock); @@ -1299,10 +1303,9 @@ bool mpz_manager::is_int64(mpz const & a) const { if (is_small(a)) return true; #ifndef _MP_GMP - if (!is_uint64(a)) { + if (!is_abs_uint64(a)) return false; - } - uint64 num get_uint64(a); + uint64 num = big_abs_to_uint64(a); uint64 msb = static_cast(1) << 63; uint64 msb_val = msb & num; if (a.m_val >= 0) { @@ -1318,7 +1321,7 @@ bool mpz_manager::is_int64(mpz const & a) const { } #else // GMP version - return mpz_cmp(*a.m_ptr, m_int64_max) <= 0; + return mpz_cmp(m_int64_min, *a.m_ptr) <= 0 && mpz_cmp(*a.m_ptr, m_int64_max) <= 0; #endif } @@ -1328,14 +1331,7 @@ uint64 mpz_manager::get_uint64(mpz const & a) const { return static_cast(a.m_val); #ifndef _MP_GMP SASSERT(a.m_ptr->m_size > 0); - if (a.m_ptr->m_size == 1) - return digits(a)[0]; - if (sizeof(digit_t) == sizeof(uint64)) - // 64-bit machine - return digits(a)[0]; - else - // 32-bit machine - return ((static_cast(digits(a)[1]) << 32) | (static_cast(digits(a)[0]))); + return big_abs_to_uint64(a); #else // GMP version if (sizeof(uint64) == sizeof(unsigned long)) { @@ -1360,7 +1356,7 @@ int64 mpz_manager::get_int64(mpz const & a) const { return static_cast(a.m_val); #ifndef _MP_GMP SASSERT(is_int64(a)); - uint64 num = get_uint64(a); + uint64 num = big_abs_to_uint64(a); if (a.m_val < 0) { if (num != 0 && (num << 1) == 0) return INT64_MIN; diff --git a/src/util/mpz.h b/src/util/mpz.h index b5c301d82..923d5b3a7 100644 --- a/src/util/mpz.h +++ b/src/util/mpz.h @@ -168,6 +168,7 @@ class mpz_manager { mpz_t * m_arg[2]; mpz_t m_uint64_max; mpz_t m_int64_max; + mpz_t m_int64_min; mpz_t * allocate() { mpz_t * cell = reinterpret_cast(m_allocator.allocate(sizeof(mpz_t))); @@ -211,6 +212,30 @@ class mpz_manager { static digit_t * digits(mpz const & c) { return c.m_ptr->m_digits; } + // Return true if the absolute value fits in a UINT64 + static bool is_abs_uint64(mpz const & a) { + if (is_small(a)) + return true; + if (sizeof(digit_t) == sizeof(uint64)) + return size(a) <= 1; + else + return size(a) <= 2; + } + + // CAST the absolute value into a UINT64 + static uint64 big_abs_to_uint64(mpz const & a) { + SASSERT(is_abs_uint64(a)); + SASSERT(!is_small(a)); + if (a.m_ptr->m_size == 1) + return digits(a)[0]; + if (sizeof(digit_t) == sizeof(uint64)) + // 64-bit machine + return digits(a)[0]; + else + // 32-bit machine + return ((static_cast(digits(a)[1]) << 32) | (static_cast(digits(a)[0]))); + } + template void get_sign_cell(mpz const & a, int & sign, mpz_cell * & cell) { if (is_small(a)) { diff --git a/src/util/util.h b/src/util/util.h index 8bcbce4a3..0aa8f881d 100644 --- a/src/util/util.h +++ b/src/util/util.h @@ -45,7 +45,7 @@ COMPILE_TIME_ASSERT(sizeof(int64) == 8); #define INT64_MIN static_cast(0x8000000000000000ull) #endif #ifndef INT64_MAX -#define INT64_MAX static_cast(0x0fffffffffffffffull) +#define INT64_MAX static_cast(0x7fffffffffffffffull) #endif #ifndef UINT64_MAX #define UINT64_MAX 0xffffffffffffffffull