3
0
Fork 0
mirror of https://github.com/Z3Prover/z3 synced 2025-04-10 19:27:06 +00:00

overhaul urle_set

Signed-off-by: Nikolaj Bjorner <nbjorner@microsoft.com>
This commit is contained in:
Nikolaj Bjorner 2013-04-09 10:15:20 -07:00
commit 9456f16a4c
15 changed files with 233 additions and 253 deletions

View file

@ -707,6 +707,7 @@ namespace datalog {
//update the head relation
make_union(new_head_reg, head_reg, delta_reg, use_widening, acc);
make_dealloc_non_void(new_head_reg, acc);
}
finish:

View file

@ -756,6 +756,10 @@ namespace datalog {
add_fact(head->get_decl(), fact);
}
bool context::has_facts(func_decl * pred) const {
return m_rel && m_rel->has_facts(pred);
}
void context::add_table_fact(func_decl * pred, const table_fact & fact) {
if (get_engine() == DATALOG_ENGINE) {
ensure_rel();

View file

@ -257,6 +257,7 @@ namespace datalog {
void add_fact(app * head);
void add_fact(func_decl * pred, const relation_fact & fact);
bool has_facts(func_decl * pred) const;
void add_rule(rule_ref& r);

View file

@ -309,6 +309,8 @@ namespace datalog {
if (!m_context.magic_sets_for_queries()) {
return 0;
}
SASSERT(source.contains(m_goal));
SASSERT(source.get_predicate_rules(m_goal).size() == 1);
app * goal_head = source.get_predicate_rules(m_goal)[0]->get_head();

View file

@ -42,20 +42,20 @@ namespace datalog {
/**
Allows to traverse head and positive tails in a single for loop starting from -1
*/
app * get_by_tail_index(rule * r, int idx) {
if (idx == -1) {
static app * get_by_tail_index(rule * r, int idx) {
if (idx < 0) {
return r->get_head();
}
SASSERT(idx<static_cast<int>(r->get_positive_tail_size()));
SASSERT(idx < static_cast<int>(r->get_positive_tail_size()));
return r->get_tail(idx);
}
template<typename T>
int aux_compare(T a, T b) {
static int aux_compare(T a, T b) {
return (a>b) ? 1 : ( (a==b) ? 0 : -1);
}
int compare_var_args(app* t1, app* t2) {
static int compare_var_args(app* t1, app* t2) {
SASSERT(t1->get_num_args()==t2->get_num_args());
int res;
unsigned n = t1->get_num_args();
@ -76,7 +76,7 @@ namespace datalog {
return 0;
}
int compare_args(app* t1, app* t2, int & skip_countdown) {
static int compare_args(app* t1, app* t2, int & skip_countdown) {
SASSERT(t1->get_num_args()==t2->get_num_args());
int res;
unsigned n = t1->get_num_args();
@ -101,7 +101,7 @@ namespace datalog {
Two rules are in the same rough similarity class if they differ only in constant arguments
of positive uninterpreted predicates.
*/
int rough_compare(rule * r1, rule * r2) {
static int rough_compare(rule * r1, rule * r2) {
int res = aux_compare(r1->get_tail_size(), r2->get_tail_size());
if (res!=0) { return res; }
res = aux_compare(r1->get_uninterpreted_tail_size(), r2->get_uninterpreted_tail_size());
@ -132,7 +132,7 @@ namespace datalog {
\c r1 and \c r2 must be equal according to the \c rough_compare function for this function
to be called.
*/
int total_compare(rule * r1, rule * r2, int skipped_arg_index = INT_MAX) {
static int total_compare(rule * r1, rule * r2, int skipped_arg_index = INT_MAX) {
SASSERT(rough_compare(r1, r2)==0);
int pos_tail_sz = r1->get_positive_tail_size();
for (int i=-1; i<pos_tail_sz; i++) {
@ -168,7 +168,7 @@ namespace datalog {
typedef svector<const_info> info_vector;
void collect_const_indexes(app * t, int tail_index, info_vector & res) {
static void collect_const_indexes(app * t, int tail_index, info_vector & res) {
unsigned n = t->get_num_args();
for (unsigned i=0; i<n; i++) {
if (is_var(t->get_arg(i))) {
@ -178,7 +178,7 @@ namespace datalog {
}
}
void collect_const_indexes(rule * r, info_vector & res) {
static void collect_const_indexes(rule * r, info_vector & res) {
collect_const_indexes(r->get_head(), -1, res);
unsigned pos_tail_sz = r->get_positive_tail_size();
for (unsigned i=0; i<pos_tail_sz; i++) {
@ -187,7 +187,7 @@ namespace datalog {
}
template<class T>
void collect_orphan_consts(rule * r, const info_vector & const_infos, T & tgt) {
static void collect_orphan_consts(rule * r, const info_vector & const_infos, T & tgt) {
unsigned const_cnt = const_infos.size();
tgt.reset();
for (unsigned i=0; i<const_cnt; i++) {
@ -201,7 +201,7 @@ namespace datalog {
}
}
template<class T>
void collect_orphan_sorts(rule * r, const info_vector & const_infos, T & tgt) {
static void collect_orphan_sorts(rule * r, const info_vector & const_infos, T & tgt) {
unsigned const_cnt = const_infos.size();
tgt.reset();
for (unsigned i=0; i<const_cnt; i++) {
@ -218,7 +218,7 @@ namespace datalog {
\brief From the \c tail_indexes and \c arg_indexes remove elements corresponding to constants
that are the same in rules \c *first ... \c *(after_last-1).
*/
void remove_stable_constants(rule_vector::iterator first, rule_vector::iterator after_last,
static void remove_stable_constants(rule_vector::iterator first, rule_vector::iterator after_last,
info_vector & const_infos) {
SASSERT(after_last-first>1);
unsigned const_cnt = const_infos.size();
@ -255,7 +255,7 @@ namespace datalog {
first constant that is equal to it in all the rules. If there is no such, it will contain
its own index.
*/
void detect_equal_constants(rule_vector::iterator first, rule_vector::iterator after_last,
static void detect_equal_constants(rule_vector::iterator first, rule_vector::iterator after_last,
info_vector & const_infos) {
SASSERT(first!=after_last);
unsigned const_cnt = const_infos.size();
@ -305,7 +305,7 @@ namespace datalog {
}
}
unsigned get_constant_count(rule * r) {
static unsigned get_constant_count(rule * r) {
unsigned res = r->get_head()->get_num_args() - count_variable_arguments(r->get_head());
unsigned pos_tail_sz = r->get_positive_tail_size();
for (unsigned i=0; i<pos_tail_sz; i++) {
@ -314,7 +314,7 @@ namespace datalog {
return res;
}
bool initial_comparator(rule * r1, rule * r2) {
static bool initial_comparator(rule * r1, rule * r2) {
int res = rough_compare(r1, r2);
if (res!=0) { return res>0; }
return total_compare(r1, r2)>0;

View file

@ -33,7 +33,7 @@ namespace datalog {
entry_storage::store_offset entry_storage::insert_or_get_reserve_content() {
SASSERT(has_reserve());
store_offset entry_ofs = m_data_indexer.insert_if_not_there(m_reserve);
if(m_reserve==entry_ofs) {
if (m_reserve == entry_ofs) {
//entry inserted, so reserve is no longer a reserve
m_reserve = NO_RESERVE;
}
@ -42,7 +42,7 @@ namespace datalog {
bool entry_storage::insert_reserve_content() {
SASSERT(has_reserve());
store_offset entry_ofs = m_data_indexer.insert_if_not_there(m_reserve);
if(m_reserve==entry_ofs) {
if (m_reserve == entry_ofs) {
//entry inserted, so reserve is no longer a reserve
m_reserve = NO_RESERVE;
return true;
@ -53,7 +53,7 @@ namespace datalog {
bool entry_storage::remove_reserve_content() {
SASSERT(has_reserve());
store_offset entry_ofs;
if(!find_reserve_content(entry_ofs)) {
if (!find_reserve_content(entry_ofs)) {
//the fact was not in the table
return false;
}
@ -64,8 +64,8 @@ namespace datalog {
void entry_storage::remove_offset(store_offset ofs) {
m_data_indexer.remove(ofs);
store_offset last_ofs = after_last_offset() - m_entry_size;
if(ofs!=last_ofs) {
SASSERT(ofs+m_entry_size<=last_ofs);
if (ofs!=last_ofs) {
SASSERT(ofs + m_entry_size <= last_ofs);
//we don't want any holes, so we put the last element at the place
//of the removed one
m_data_indexer.remove(last_ofs);
@ -73,7 +73,7 @@ namespace datalog {
memcpy(base+ofs, base+last_ofs, m_entry_size);
m_data_indexer.insert(ofs);
}
if(has_reserve()) {
if (has_reserve()) {
//we already have a reserve, so we need to shrink a little to keep having just one
resize_data(m_data_size-m_entry_size);
}
@ -98,20 +98,20 @@ namespace datalog {
unsigned length = 0;
unsigned dom_size_sm;
if(dom_size>UINT_MAX) {
if (dom_size>UINT_MAX) {
dom_size_sm = static_cast<unsigned>(dom_size>>32);
length += 32;
if( (dom_size&UINT_MAX)!=0 && dom_size_sm!=UINT_MAX ) {
if ( (dom_size&UINT_MAX)!=0 && dom_size_sm!=UINT_MAX ) {
dom_size_sm++;
}
}
else {
dom_size_sm=static_cast<unsigned>(dom_size);
}
if(dom_size_sm==1) {
if (dom_size_sm == 1) {
length += 1; //unary domains
}
else if(dom_size_sm>0x80000000u) {
else if (dom_size_sm > 0x80000000u) {
length += 32;
}
else {
@ -122,30 +122,30 @@ namespace datalog {
sparse_table::column_layout::column_layout(const table_signature & sig)
: m_functional_col_cnt(sig.functional_columns()) {
SASSERT(sig.size()>0);
SASSERT(sig.size() > 0);
unsigned ofs = 0;
unsigned sig_sz = sig.size();
unsigned first_functional = sig_sz-m_functional_col_cnt;
for(unsigned i=0; i<sig_sz; i++) {
for (unsigned i=0; i<sig_sz; i++) {
uint64 dom_size = sig[i];
unsigned length = get_domain_length(dom_size);
SASSERT(length>0);
SASSERT(length<=64);
if(size()>0 && (length>54 || i==first_functional)) {
if (size() > 0 && (length > 54 || i == first_functional)) {
//large domains must start byte-aligned, as well as functional columns
make_byte_aligned_end(size()-1);
ofs = back().next_ofs();
}
push_back(column_info(ofs, length));
ofs+=length;
ofs += length;
}
make_byte_aligned_end(size()-1);
SASSERT(back().next_ofs()%8==0);//the entries must be aligned to whole bytes
SASSERT(back().next_ofs()%8 == 0);//the entries must be aligned to whole bytes
m_entry_size = back().next_ofs()/8;
if(m_functional_col_cnt) {
SASSERT((*this)[first_functional].m_offset%8==0);
if (m_functional_col_cnt) {
SASSERT((*this)[first_functional].m_offset%8 == 0);
m_functional_part_size = m_entry_size - (*this)[first_functional].m_offset/8;
}
else {
@ -156,9 +156,9 @@ namespace datalog {
void sparse_table::column_layout::make_byte_aligned_end(unsigned col_index0) {
unsigned ofs = (*this)[col_index0].next_ofs();
unsigned ofs_bit_part = ofs%8;
unsigned rounded_ofs = (ofs_bit_part==0) ? ofs : (ofs+8-ofs_bit_part);
unsigned rounded_ofs = (ofs_bit_part == 0) ? ofs : (ofs+8-ofs_bit_part);
if(rounded_ofs!=ofs) {
if (rounded_ofs!=ofs) {
SASSERT(rounded_ofs>ofs);
int diff = rounded_ofs-ofs;
unsigned col_idx = col_index0+1;
@ -168,18 +168,18 @@ namespace datalog {
col_idx--;
column_info & ci = (*this)[col_idx];
unsigned new_length = ci.m_length;
if(ci.m_length<64) {
if (ci.m_length < 64) {
unsigned swallowed = std::min(64-static_cast<int>(ci.m_length), diff);
diff-=swallowed;
new_length+=swallowed;
diff -= swallowed;
new_length += swallowed;
}
unsigned new_ofs = ci.m_offset+diff;
ci = column_info(new_ofs, new_length);
}
}
SASSERT(rounded_ofs%8==0);
SASSERT((*this)[col_index0].next_ofs()%8==0);
SASSERT(rounded_ofs%8 == 0);
SASSERT((*this)[col_index0].next_ofs()%8 == 0);
}
// -----------------------------------
@ -218,7 +218,7 @@ namespace datalog {
m_layout(t.m_column_layout) {}
virtual bool is_finished() const {
return m_ptr==m_end;
return m_ptr == m_end;
}
virtual row_interface & operator*() {
@ -267,7 +267,7 @@ namespace datalog {
offset_iterator begin() const { return m_singleton ? &m_single_result : m_many.begin; }
offset_iterator end() const { return m_singleton ? (&m_single_result+1) : m_many.end; }
bool empty() const { return begin()==end(); }
bool empty() const { return begin() == end(); }
};
key_indexer(unsigned key_len, const unsigned * key_cols)
@ -299,7 +299,7 @@ namespace datalog {
key_to_reserve(key);
store_offset ofs = m_keys.insert_or_get_reserve_content();
index_map::entry * e = m_map.find_core(ofs);
if(!e) {
if (!e) {
TRACE("dl_table_relation", tout << "inserting\n";);
e = m_map.insert_if_not_there2(ofs, offset_vector());
}
@ -312,7 +312,7 @@ namespace datalog {
m_first_nonindexed(0) {}
virtual void update(const sparse_table & t) {
if(m_first_nonindexed==t.m_data.after_last_offset()) {
if (m_first_nonindexed == t.m_data.after_last_offset()) {
return;
}
SASSERT(m_first_nonindexed<t.m_data.after_last_offset());
@ -330,16 +330,16 @@ namespace datalog {
DEBUG_CODE( index_entry = 0; );
bool key_modified = true;
for(; ofs!=after_last; ofs+=t.m_fact_size) {
for(unsigned i=0; i<key_len; i++) {
for (; ofs!=after_last; ofs+=t.m_fact_size) {
for (unsigned i=0; i<key_len; i++) {
table_element col_val = t.get_cell(ofs, m_key_cols[i]);
if(key[i]!=col_val) {
if (key[i]!=col_val) {
key[i] = col_val;
key_modified = true;
}
}
if(key_modified) {
if (key_modified) {
index_entry = &get_matching_offset_vector(key);
key_modified = false;
}
@ -354,11 +354,11 @@ namespace datalog {
virtual query_result get_matching_offsets(const key_value & key) const {
key_to_reserve(key);
store_offset ofs;
if(!m_keys.find_reserve_content(ofs)) {
if (!m_keys.find_reserve_content(ofs)) {
return query_result();
}
index_map::entry * e = m_map.find_core(ofs);
if(!e) {
if (!e) {
return query_result();
}
const offset_vector & res = e->get_data().m_value;
@ -381,15 +381,15 @@ namespace datalog {
static bool can_handle(unsigned key_len, const unsigned * key_cols, const sparse_table & t) {
unsigned non_func_cols = t.get_signature().first_functional();
if(key_len!=non_func_cols) {
if (key_len!=non_func_cols) {
return false;
}
counter ctr;
ctr.count(key_len, key_cols);
if(ctr.get_max_counter_value()!=1 || ctr.get_max_positive()!=non_func_cols-1) {
if (ctr.get_max_counter_value()!=1 || ctr.get_max_positive()!=non_func_cols-1) {
return false;
}
SASSERT(ctr.get_positive_count()==non_func_cols);
SASSERT(ctr.get_positive_count() == non_func_cols);
return true;
}
@ -399,7 +399,7 @@ namespace datalog {
SASSERT(can_handle(key_len, key_cols, t));
m_permutation.resize(key_len);
for(unsigned i=0; i<key_len; i++) {
for (unsigned i=0; i<key_len; i++) {
//m_permutation[m_key_cols[i]] = i;
m_permutation[i] = m_key_cols[i];
}
@ -410,7 +410,7 @@ namespace datalog {
virtual query_result get_matching_offsets(const key_value & key) const {
unsigned key_len = m_key_cols.size();
for(unsigned i=0; i<key_len; i++) {
for (unsigned i=0; i<key_len; i++) {
m_key_fact[m_permutation[i]] = key[i];
}
//We will change the content of the reserve; which does not change the 'high-level'
@ -419,7 +419,7 @@ namespace datalog {
t.write_into_reserve(m_key_fact.c_ptr());
store_offset res;
if(!t.m_data.find_reserve_content(res)) {
if (!t.m_data.find_reserve_content(res)) {
return query_result();
}
return query_result(res);
@ -466,14 +466,14 @@ namespace datalog {
//without having to worry about updating indexes.
//Maybe we might keep a list of indexes that contain functional columns and on an update reset
//only those.
SASSERT(key_len==0 ||
SASSERT(key_len == 0 ||
counter().count(key_len, key_cols).get_max_positive()<get_signature().first_functional());
#endif
key_spec kspec;
kspec.append(key_len, key_cols);
key_index_map::entry * key_map_entry = m_key_indexes.insert_if_not_there2(kspec, 0);
if(!key_map_entry->get_data().m_value) {
if(full_signature_key_indexer::can_handle(key_len, key_cols, *this)) {
if (!key_map_entry->get_data().m_value) {
if (full_signature_key_indexer::can_handle(key_len, key_cols, *this)) {
key_map_entry->get_data().m_value = alloc(full_signature_key_indexer, key_len, key_cols, *this);
}
else {
@ -488,7 +488,7 @@ namespace datalog {
void sparse_table::reset_indexes() {
key_index_map::iterator kmit = m_key_indexes.begin();
key_index_map::iterator kmend = m_key_indexes.end();
for(; kmit!=kmend; ++kmit) {
for (; kmit!=kmend; ++kmit) {
dealloc((*kmit).m_value);
}
m_key_indexes.reset();
@ -499,11 +499,8 @@ namespace datalog {
m_data.ensure_reserve();
char * reserve = m_data.get_reserve_ptr();
unsigned col_cnt = m_column_layout.size();
for(unsigned i=0; i<col_cnt; i++) {
if (f[i] >= get_signature()[i]) {
std::cout << f[i] << " " << get_signature()[i] << "\n";
}
SASSERT(f[i]<get_signature()[i]); //the value fits into the table signature
for (unsigned i = 0; i < col_cnt; ++i) {
SASSERT(f[i] < get_signature()[i]); //the value fits into the table signature
m_column_layout.set(reserve, i, f[i]);
}
}
@ -526,17 +523,17 @@ namespace datalog {
sparse_table & t = const_cast<sparse_table &>(*this);
t.write_into_reserve(f.c_ptr());
unsigned func_col_cnt = get_signature().functional_columns();
if(func_col_cnt==0) {
if (func_col_cnt == 0) {
return t.m_data.reserve_content_already_present();
}
else {
store_offset ofs;
if(!t.m_data.find_reserve_content(ofs)) {
if (!t.m_data.find_reserve_content(ofs)) {
return false;
}
unsigned sz = get_signature().size();
for(unsigned i=func_col_cnt; i<sz; i++) {
if(t.get_cell(ofs, i)!=f[i]) {
for (unsigned i=func_col_cnt; i<sz; i++) {
if (t.get_cell(ofs, i)!=f[i]) {
return false;
}
}
@ -546,19 +543,19 @@ namespace datalog {
bool sparse_table::fetch_fact(table_fact & f) const {
const table_signature & sig = get_signature();
SASSERT(f.size()==sig.size());
if(sig.functional_columns()==0) {
SASSERT(f.size() == sig.size());
if (sig.functional_columns() == 0) {
return contains_fact(f);
}
else {
sparse_table & t = const_cast<sparse_table &>(*this);
t.write_into_reserve(f.c_ptr());
store_offset ofs;
if(!t.m_data.find_reserve_content(ofs)) {
if (!t.m_data.find_reserve_content(ofs)) {
return false;
}
unsigned sz = sig.size();
for(unsigned i=sig.first_functional(); i<sz; i++) {
for (unsigned i=sig.first_functional(); i<sz; i++) {
f[i] = t.get_cell(ofs, i);
}
return true;
@ -571,18 +568,18 @@ namespace datalog {
*/
void sparse_table::ensure_fact(const table_fact & f) {
const table_signature & sig = get_signature();
if(sig.functional_columns()==0) {
if (sig.functional_columns() == 0) {
add_fact(f);
}
else {
write_into_reserve(f.c_ptr());
store_offset ofs;
if(!m_data.find_reserve_content(ofs)) {
if (!m_data.find_reserve_content(ofs)) {
add_fact(f);
return;
}
unsigned sz = sig.size();
for(unsigned i=sig.first_functional(); i<sz; i++) {
for (unsigned i=sig.first_functional(); i<sz; i++) {
set_cell(ofs, i, f[i]);
}
}
@ -591,7 +588,7 @@ namespace datalog {
void sparse_table::remove_fact(const table_element* f) {
//first insert the fact so that we find it's original location and remove it
write_into_reserve(f);
if(!m_data.remove_reserve_content()) {
if (!m_data.remove_reserve_content()) {
//the fact was not in the table
return;
}
@ -601,8 +598,8 @@ namespace datalog {
void sparse_table::copy_columns(const column_layout & src_layout, const column_layout & dest_layout,
unsigned start_index, unsigned after_last, const char * src, char * dest,
unsigned & dest_idx, unsigned & pre_projection_idx, const unsigned * & next_removed) {
for(unsigned i=start_index; i<after_last; i++, pre_projection_idx++) {
if(*next_removed==pre_projection_idx) {
for (unsigned i=start_index; i<after_last; i++, pre_projection_idx++) {
if (*next_removed == pre_projection_idx) {
next_removed++;
continue;
}
@ -656,18 +653,18 @@ namespace datalog {
tout << (&t1) << " " << (&t2) << " " << (&result) << "\n";
);
if(joined_col_cnt==0) {
if (joined_col_cnt == 0) {
unsigned t2idx = 0;
unsigned t2end = t2.m_data.after_last_offset();
for(; t1idx!=t1end; t1idx+=t1_entry_size) {
for(t2idx = 0; t2idx != t2end; t2idx += t2_entry_size) {
for (; t1idx!=t1end; t1idx+=t1_entry_size) {
for (t2idx = 0; t2idx != t2end; t2idx += t2_entry_size) {
result.m_data.ensure_reserve();
result.garbage_collect();
char * res_reserve = result.m_data.get_reserve_ptr();
char const* t1ptr = t1.get_at_offset(t1idx);
char const* t2ptr = t2.get_at_offset(t2idx);
if(tables_swapped) {
if (tables_swapped) {
concatenate_rows(t2.m_column_layout, t1.m_column_layout, result.m_column_layout,
t2ptr, t1ptr, res_reserve, removed_cols);
} else {
@ -687,34 +684,34 @@ namespace datalog {
bool key_modified = true;
key_indexer::query_result t2_offsets;
for(; t1idx != t1end; t1idx += t1_entry_size) {
for(unsigned i = 0; i < joined_col_cnt; i++) {
for (; t1idx != t1end; t1idx += t1_entry_size) {
for (unsigned i = 0; i < joined_col_cnt; i++) {
table_element val = t1.m_column_layout.get(t1.get_at_offset(t1idx), t1_joined_cols[i]);
TRACE("dl_table_relation", tout << "val: " << val << " " << t1idx << " " << t1_joined_cols[i] << "\n";);
if(t1_key[i] != val) {
if (t1_key[i] != val) {
t1_key[i] = val;
key_modified = true;
}
}
if(key_modified) {
if (key_modified) {
t2_offsets = t2_indexer.get_matching_offsets(t1_key);
key_modified = false;
}
if(t2_offsets.empty()) {
if (t2_offsets.empty()) {
continue;
}
key_indexer::offset_iterator t2ofs_it = t2_offsets.begin();
key_indexer::offset_iterator t2ofs_end = t2_offsets.end();
for(; t2ofs_it != t2ofs_end; ++t2ofs_it) {
for (; t2ofs_it != t2ofs_end; ++t2ofs_it) {
store_offset t2ofs = *t2ofs_it;
result.m_data.ensure_reserve();
result.garbage_collect();
char * res_reserve = result.m_data.get_reserve_ptr();
char const * t1ptr = t1.get_at_offset(t1idx);
char const * t2ptr = t2.get_at_offset(t2ofs);
if(tables_swapped) {
if (tables_swapped) {
concatenate_rows(t2.m_column_layout, t1.m_column_layout, result.m_column_layout,
t2ptr, t1ptr, res_reserve, removed_cols);
} else {
@ -743,11 +740,11 @@ namespace datalog {
void sparse_table_plugin::reset() {
table_pool::iterator it = m_pool.begin();
table_pool::iterator end = m_pool.end();
for(; it!=end; ++it) {
for (; it!=end; ++it) {
sp_table_vector * vect = it->m_value;
sp_table_vector::iterator it = vect->begin();
sp_table_vector::iterator end = vect->end();
for(; it!=end; ++it) {
for (; it!=end; ++it) {
(*it)->destroy(); //calling deallocate() would only put the table back into the pool
}
dealloc(vect);
@ -767,7 +764,7 @@ namespace datalog {
table_pool::entry * e = m_pool.insert_if_not_there2(sig, 0);
sp_table_vector * & vect = e->get_data().m_value;
if(vect==0) {
if (vect == 0) {
vect = alloc(sp_table_vector);
}
IF_VERBOSE(12, verbose_stream() << "Recycle: " << t->get_size_estimate_bytes() << "\n";);
@ -779,7 +776,7 @@ namespace datalog {
SASSERT(can_handle_signature(s));
sp_table_vector * vect;
if(!m_pool.find(s, vect) || vect->empty()) {
if (!m_pool.find(s, vect) || vect->empty()) {
return alloc(sparse_table, *this, s);
}
sparse_table * res = vect->back();
@ -796,7 +793,7 @@ namespace datalog {
bool sparse_table_plugin::join_involves_functional(const table_signature & s1, const table_signature & s2,
unsigned col_cnt, const unsigned * cols1, const unsigned * cols2) {
if(col_cnt==0) {
if (col_cnt == 0) {
return false;
}
return counter().count(col_cnt, cols1).get_max_positive()>=s1.first_functional()
@ -827,7 +824,7 @@ namespace datalog {
//do indexing into the bigger one. If we simply do a product, we want the bigger
//one to be at the outer iteration (then the small one will hopefully fit into
//the cache)
if( (t1.row_count() > t2.row_count()) == (!m_cols1.empty()) ) {
if ( (t1.row_count() > t2.row_count()) == (!m_cols1.empty()) ) {
sparse_table::self_agnostic_join_project(t2, t1, m_cols1.size(), m_cols2.c_ptr(),
m_cols1.c_ptr(), m_removed_cols.c_ptr(), true, *res);
}
@ -844,7 +841,7 @@ namespace datalog {
unsigned col_cnt, const unsigned * cols1, const unsigned * cols2) {
const table_signature & sig1 = t1.get_signature();
const table_signature & sig2 = t2.get_signature();
if(t1.get_kind()!=get_kind() || t2.get_kind()!=get_kind()
if (t1.get_kind()!=get_kind() || t2.get_kind()!=get_kind()
|| join_involves_functional(sig1, sig2, col_cnt, cols1, cols2)) {
//We also don't allow indexes on functional columns (and they are needed for joins)
return 0;
@ -857,8 +854,8 @@ namespace datalog {
const unsigned * removed_cols) {
const table_signature & sig1 = t1.get_signature();
const table_signature & sig2 = t2.get_signature();
if(t1.get_kind()!=get_kind() || t2.get_kind()!=get_kind()
|| removed_col_cnt==t1.get_signature().size()+t2.get_signature().size()
if (t1.get_kind()!=get_kind() || t2.get_kind()!=get_kind()
|| removed_col_cnt == t1.get_signature().size()+t2.get_signature().size()
|| join_involves_functional(sig1, sig2, col_cnt, cols1, cols2)) {
//We don't allow sparse tables with zero signatures (and project on all columns leads to such)
//We also don't allow indexes on functional columns.
@ -879,8 +876,8 @@ namespace datalog {
unsigned fact_size = tgt.m_fact_size;
const char* ptr = src.m_data.begin();
const char* after_last=src.m_data.after_last();
for(; ptr<after_last; ptr+=fact_size) {
if(tgt.add_fact(ptr) && delta) {
for (; ptr<after_last; ptr+=fact_size) {
if (tgt.add_fact(ptr) && delta) {
delta->add_fact(ptr);
}
}
@ -889,7 +886,7 @@ namespace datalog {
table_union_fn * sparse_table_plugin::mk_union_fn(const table_base & tgt, const table_base & src,
const table_base * delta) {
if(tgt.get_kind()!=get_kind() || src.get_kind()!=get_kind()
if (tgt.get_kind()!=get_kind() || src.get_kind()!=get_kind()
|| (delta && delta->get_kind()!=get_kind())
|| tgt.get_signature()!=src.get_signature()
|| (delta && delta->get_signature()!=tgt.get_signature())) {
@ -916,8 +913,8 @@ namespace datalog {
const sparse_table::column_layout & tgt_layout) {
unsigned r_idx=0;
unsigned tgt_i=0;
for(unsigned i=0; i<m_inp_col_cnt; i++) {
if(r_idx!=m_removed_col_cnt && i==m_removed_cols[r_idx]) {
for (unsigned i=0; i<m_inp_col_cnt; i++) {
if (r_idx!=m_removed_col_cnt && i == m_removed_cols[r_idx]) {
SASSERT(r_idx<m_removed_col_cnt);
r_idx++;
continue;
@ -925,8 +922,8 @@ namespace datalog {
tgt_layout.set(tgt, tgt_i, src_layout.get(src, i));
tgt_i++;
}
SASSERT(tgt_i==m_result_col_cnt);
SASSERT(r_idx==m_removed_col_cnt);
SASSERT(tgt_i == m_result_col_cnt);
SASSERT(r_idx == m_removed_col_cnt);
}
virtual table_base * operator()(const table_base & tb) {
@ -942,7 +939,7 @@ namespace datalog {
const char* t_ptr = t.m_data.begin();
const char* t_end = t.m_data.after_last();
for(; t_ptr!=t_end; t_ptr+=t_fact_size) {
for (; t_ptr!=t_end; t_ptr+=t_fact_size) {
SASSERT(t_ptr<t_end);
res->m_data.ensure_reserve();
char * res_ptr = res->m_data.get_reserve_ptr();
@ -955,7 +952,7 @@ namespace datalog {
table_transformer_fn * sparse_table_plugin::mk_project_fn(const table_base & t, unsigned col_cnt,
const unsigned * removed_cols) {
if(col_cnt==t.get_signature().size()) {
if (col_cnt == t.get_signature().size()) {
return 0;
}
return alloc(project_fn, t.get_signature(), col_cnt, removed_cols);
@ -984,14 +981,14 @@ namespace datalog {
sparse_table::key_indexer & indexer = t.get_key_indexer(1, &m_col);
sparse_table::key_indexer::query_result t_offsets = indexer.get_matching_offsets(m_key);
if(t_offsets.empty()) {
if (t_offsets.empty()) {
//no matches
return res;
}
sparse_table::key_indexer::offset_iterator ofs_it=t_offsets.begin();
sparse_table::key_indexer::offset_iterator ofs_end=t_offsets.end();
for(; ofs_it!=ofs_end; ++ofs_it) {
for (; ofs_it!=ofs_end; ++ofs_it) {
sparse_table::store_offset t_ofs = *ofs_it;
const char * t_ptr = t.get_at_offset(t_ofs);
@ -999,8 +996,8 @@ namespace datalog {
char * res_reserve = res->m_data.get_reserve_ptr();
unsigned res_i = 0;
for(unsigned i=0; i<t_cols; i++) {
if(i==m_col) {
for (unsigned i=0; i<t_cols; i++) {
if (i == m_col) {
continue;
}
res_layout.set(res_reserve, res_i++, t_layout.get(t_ptr, i));
@ -1013,7 +1010,7 @@ namespace datalog {
table_transformer_fn * sparse_table_plugin::mk_select_equal_and_project_fn(const table_base & t,
const table_element & value, unsigned col) {
if(t.get_kind()!=get_kind() || t.get_signature().size()==1 || col>=t.get_signature().first_functional()) {
if (t.get_kind()!=get_kind() || t.get_signature().size() == 1 || col>=t.get_signature().first_functional()) {
//We don't allow sparse tables with zero signatures (and project on a single
//column table produces one).
//We also don't allow indexes on functional columns. And our implementation of
@ -1034,11 +1031,11 @@ namespace datalog {
m_cycle_len(permutation_cycle_len), m_col_cnt(orig_sig.size()) {
SASSERT(permutation_cycle_len>=2);
idx_set cycle_cols;
for(unsigned i=0; i<m_cycle_len; i++) {
for (unsigned i=0; i<m_cycle_len; i++) {
cycle_cols.insert(permutation_cycle[i]);
}
for(unsigned i=0; i<m_col_cnt; i++) {
if(!cycle_cols.contains(i)) {
for (unsigned i=0; i<m_col_cnt; i++) {
if (!cycle_cols.contains(i)) {
m_out_of_cycle.push_back(i);
}
}
@ -1048,14 +1045,14 @@ namespace datalog {
const sparse_table::column_layout & src_layout,
const sparse_table::column_layout & tgt_layout) {
for(unsigned i=1; i<m_cycle_len; i++) {
for (unsigned i=1; i<m_cycle_len; i++) {
tgt_layout.set(tgt, m_cycle[i-1], src_layout.get(src, m_cycle[i]));
}
tgt_layout.set(tgt, m_cycle[m_cycle_len-1], src_layout.get(src, m_cycle[0]));
unsigned_vector::const_iterator it = m_out_of_cycle.begin();
unsigned_vector::const_iterator end = m_out_of_cycle.end();
for(; it!=end; ++it) {
for (; it!=end; ++it) {
unsigned col = *it;
tgt_layout.set(tgt, col, src_layout.get(src, col));
}
@ -1082,12 +1079,12 @@ namespace datalog {
const char* t_ptr = t.m_data.begin();
char* res_ptr = res->m_data.begin();
char* res_end = res_ptr+res_data_size;
for(; res_ptr!=res_end; t_ptr+=t_fact_size, res_ptr+=res_fact_size) {
for (; res_ptr!=res_end; t_ptr+=t_fact_size, res_ptr+=res_fact_size) {
transform_row(t_ptr, res_ptr, t.m_column_layout, res->m_column_layout);
}
//and insert them into the hash-map
for(unsigned i=0; i!=res_data_size; i+=res_fact_size) {
for (unsigned i=0; i!=res_data_size; i+=res_fact_size) {
TRUSTME(res->m_data.insert_offset(i));
}
@ -1097,7 +1094,7 @@ namespace datalog {
table_transformer_fn * sparse_table_plugin::mk_rename_fn(const table_base & t, unsigned permutation_cycle_len,
const unsigned * permutation_cycle) {
if(t.get_kind()!=get_kind()) {
if (t.get_kind()!=get_kind()) {
return 0;
}
return alloc(rename_fn, t.get_signature(), permutation_cycle_len, permutation_cycle);
@ -1123,9 +1120,9 @@ namespace datalog {
unsigned neg_fisrt_func = neg.get_signature().first_functional();
counter ctr;
ctr.count(m_cols2);
m_joining_neg_non_functional = ctr.get_max_counter_value()==1
&& ctr.get_positive_count()==neg_fisrt_func
&& (neg_fisrt_func==0 || ctr.get_max_positive()==neg_fisrt_func-1);
m_joining_neg_non_functional = ctr.get_max_counter_value() == 1
&& ctr.get_positive_count() == neg_fisrt_func
&& (neg_fisrt_func == 0 || ctr.get_max_positive() == neg_fisrt_func-1);
}
/**
@ -1136,7 +1133,7 @@ namespace datalog {
bool tgt_is_first, svector<store_offset> & res) {
SASSERT(res.empty());
if(!tgt_is_first) {
if (!tgt_is_first) {
m_intersection_content.reset();
}
@ -1153,32 +1150,32 @@ namespace datalog {
bool key_modified=true;
key_indexer::query_result t2_offsets;
store_offset t1_after_last = t1.m_data.after_last_offset();
for(store_offset t1_ofs=0; t1_ofs<t1_after_last; t1_ofs+=t1_entry_size) {
for (store_offset t1_ofs=0; t1_ofs<t1_after_last; t1_ofs+=t1_entry_size) {
for(unsigned i=0; i<joined_col_cnt; i++) {
for (unsigned i=0; i<joined_col_cnt; i++) {
table_element val = t1.get_cell(t1_ofs, cols1[i]);
if(t1_key[i]!=val) {
if (t1_key[i]!=val) {
t1_key[i]=val;
key_modified=true;
}
}
if(key_modified) {
if (key_modified) {
t2_offsets = t2_indexer.get_matching_offsets(t1_key);
key_modified=false;
}
if(t2_offsets.empty()) {
if (t2_offsets.empty()) {
continue;
}
if(tgt_is_first) {
if (tgt_is_first) {
res.push_back(t1_ofs);
}
else {
key_indexer::offset_iterator it = t2_offsets.begin();
key_indexer::offset_iterator end = t2_offsets.end();
for(; it!=end; ++it) {
for (; it!=end; ++it) {
store_offset ofs = *it;
if(!m_intersection_content.contains(ofs)) {
if (!m_intersection_content.contains(ofs)) {
m_intersection_content.insert(ofs);
res.push_back(ofs);
}
@ -1186,7 +1183,7 @@ namespace datalog {
}
}
if(!tgt_is_first) {
if (!tgt_is_first) {
//in this case \c res now may be in arbitrary order
std::sort(res.begin(), res.end());
}
@ -1196,8 +1193,8 @@ namespace datalog {
sparse_table & tgt = static_cast<sparse_table &>(tgt0);
const sparse_table & neg = static_cast<const sparse_table &>(neg0);
if(m_cols1.size()==0) {
if(!neg.empty()) {
if (m_cols1.size() == 0) {
if (!neg.empty()) {
tgt.reset();
}
return;
@ -1207,14 +1204,14 @@ namespace datalog {
//We don't do just the simple tgt.row_count()>neg.row_count() because the swapped case is
//more expensive. The constant 4 is, however, just my guess what the ratio might be.
if(tgt.row_count()/4>neg.row_count()) {
if (tgt.row_count()/4>neg.row_count()) {
collect_intersection_offsets(neg, tgt, false, to_remove);
}
else {
collect_intersection_offsets(tgt, neg, true, to_remove);
}
if(to_remove.empty()) {
if (to_remove.empty()) {
return;
}
@ -1232,7 +1229,7 @@ namespace datalog {
table_intersection_filter_fn * sparse_table_plugin::mk_filter_by_negation_fn(const table_base & t,
const table_base & negated_obj, unsigned joined_col_cnt,
const unsigned * t_cols, const unsigned * negated_cols) {
if(!check_kind(t) || !check_kind(negated_obj)
if (!check_kind(t) || !check_kind(negated_obj)
|| join_involves_functional(t.get_signature(), negated_obj.get_signature(), joined_col_cnt,
t_cols, negated_cols) ) {
return 0;

View file

@ -107,15 +107,16 @@ namespace datalog {
scoped_query scoped_query(m_context);
m_code.reset();
instruction_block termination_code;
m_ectx.reset();
lbool result;
TRACE("dl", m_context.display(tout););
while (true) {
m_ectx.reset();
m_code.reset();
termination_code.reset();
m_context.ensure_closed();
m_context.transform_rules();
if (m_context.canceled()) {
@ -174,8 +175,6 @@ namespace datalog {
else {
restart_time = static_cast<unsigned>(new_restart_time);
}
termination_code.reset();
scoped_query.reset();
}
m_context.record_transformed_rules();
@ -452,6 +451,11 @@ namespace datalog {
}
}
bool rel_context::has_facts(func_decl * pred) const {
relation_base* r = try_get_relation(pred);
return r && !r->empty();
}
void rel_context::store_relation(func_decl * pred, relation_base * rel) {
get_rmanager().store_relation(pred, rel);
}

View file

@ -87,9 +87,14 @@ namespace datalog {
*/
bool result_contains_fact(relation_fact const& f);
/** \brief add facts to relation
*/
void add_fact(func_decl* pred, relation_fact const& fact);
void add_fact(func_decl* pred, table_fact const& fact);
/** \brief check if facts were added to relation
*/
bool has_facts(func_decl * pred) const;
/**
\brief Store the relation \c rel under the predicate \c pred. The \c context object

View file

@ -29,6 +29,7 @@ Revision History:
#include"th_rewriter.h"
#include"filter_model_converter.h"
#include"ast_smt2_pp.h"
#include"expr_replacer.h"
/*
----
@ -131,18 +132,16 @@ struct purify_arith_proc {
proof_ref_vector m_new_cnstr_prs;
expr_ref m_subst;
proof_ref m_subst_pr;
bool m_in_q;
unsigned m_var_idx;
expr_ref_vector m_new_vars;
rw_cfg(purify_arith_proc & o, bool in_q):
rw_cfg(purify_arith_proc & o):
m_owner(o),
m_pinned(o.m()),
m_new_cnstrs(o.m()),
m_new_cnstr_prs(o.m()),
m_subst(o.m()),
m_subst_pr(o.m()),
m_in_q(in_q),
m_var_idx(0) {
m_new_vars(o.m()) {
}
ast_manager & m() { return m_owner.m(); }
@ -155,14 +154,9 @@ struct purify_arith_proc {
bool elim_inverses() const { return m_owner.m_elim_inverses; }
expr * mk_fresh_var(bool is_int) {
if (m_in_q) {
unsigned idx = m_var_idx;
m_var_idx++;
return m().mk_var(idx, is_int ? u().mk_int() : u().mk_real());
}
else {
return m().mk_fresh_const(0, is_int ? u().mk_int() : u().mk_real());
}
expr * r = m().mk_fresh_const(0, is_int ? u().mk_int() : u().mk_real());
m_new_vars.push_back(r);
return r;
}
expr * mk_fresh_real_var() { return mk_fresh_var(false); }
@ -596,105 +590,51 @@ struct purify_arith_proc {
struct rw : public rewriter_tpl<rw_cfg> {
rw_cfg m_cfg;
rw(purify_arith_proc & o, bool in_q):
rw(purify_arith_proc & o):
rewriter_tpl<rw_cfg>(o.m(), o.m_produce_proofs, m_cfg),
m_cfg(o, in_q) {
}
};
/**
\brief Return the number of (auxiliary) variables needed for converting an expression.
*/
struct num_vars_proc {
arith_util & m_util;
expr_fast_mark1 m_visited;
ptr_vector<expr> m_todo;
unsigned m_num_vars;
bool m_elim_root_objs;
num_vars_proc(arith_util & u, bool elim_root_objs):
m_util(u),
m_elim_root_objs(elim_root_objs) {
}
void visit(expr * t) {
if (m_visited.is_marked(t))
return;
m_visited.mark(t);
m_todo.push_back(t);
}
void process(app * t) {
if (t->get_family_id() == m_util.get_family_id()) {
if (m_util.is_power(t)) {
rational k;
if (m_util.is_numeral(t->get_arg(1), k) && (k.is_zero() || !k.is_int())) {
m_num_vars++;
}
}
else if (m_util.is_div(t) ||
m_util.is_idiv(t) ||
m_util.is_mod(t) ||
m_util.is_to_int(t) ||
(m_util.is_irrational_algebraic_numeral(t) && m_elim_root_objs)) {
m_num_vars++;
}
}
unsigned num_args = t->get_num_args();
for (unsigned i = 0; i < num_args; i++)
visit(t->get_arg(i));
}
unsigned operator()(expr * t) {
m_num_vars = 0;
visit(t);
while (!m_todo.empty()) {
expr * t = m_todo.back();
m_todo.pop_back();
if (is_app(t))
process(to_app(t));
}
m_visited.reset();
return m_num_vars;
m_cfg(o) {
}
};
void process_quantifier(quantifier * q, expr_ref & result, proof_ref & result_pr) {
result_pr = 0;
num_vars_proc p(u(), m_elim_root_objs);
expr_ref body(m());
unsigned num_vars = p(q->get_expr());
if (num_vars > 0) {
// open space for aux vars
var_shifter shifter(m());
shifter(q->get_expr(), num_vars, body);
}
else {
body = q->get_expr();
}
rw r(*this, true);
rw r(*this);
expr_ref new_body(m());
proof_ref new_body_pr(m());
r(body, new_body, new_body_pr);
r(q->get_expr(), new_body, new_body_pr);
unsigned num_vars = r.cfg().m_new_vars.size();
TRACE("purify_arith",
tout << "num_vars: " << num_vars << "\n";
tout << "body: " << mk_ismt2_pp(body, m()) << "\nnew_body: " << mk_ismt2_pp(new_body, m()) << "\n";);
tout << "body: " << mk_ismt2_pp(q->get_expr(), m()) << "\nnew_body: " << mk_ismt2_pp(new_body, m()) << "\n";);
if (num_vars == 0) {
SASSERT(r.cfg().m_new_cnstrs.empty());
result = m().update_quantifier(q, new_body);
if (m_produce_proofs)
result_pr = m().mk_quant_intro(q, to_quantifier(result.get()), result_pr);
}
else {
// Add new constraints
expr_ref_vector & cnstrs = r.cfg().m_new_cnstrs;
cnstrs.push_back(new_body);
new_body = m().mk_and(cnstrs.size(), cnstrs.c_ptr());
// Open space for new variables
var_shifter shifter(m());
shifter(new_body, num_vars, new_body);
// Rename fresh constants in r.cfg().m_new_vars to variables
ptr_buffer<sort> sorts;
buffer<symbol> names;
expr_substitution subst(m(), false, false);
for (unsigned i = 0; i < num_vars; i++) {
sorts.push_back(u().mk_real());
expr * c = r.cfg().m_new_vars.get(i);
sort * s = get_sort(c);
sorts.push_back(s);
names.push_back(m().mk_fresh_var_name("x"));
unsigned idx = num_vars - i - 1;
subst.insert(c, m().mk_var(idx, s));
}
scoped_ptr<expr_replacer> replacer = mk_default_expr_replacer(m());
replacer->set_substitution(&subst);
(*replacer)(new_body, new_body);
new_body = m().mk_exists(num_vars, sorts.c_ptr(), names.c_ptr(), new_body);
result = m().update_quantifier(q, new_body);
if (m_produce_proofs) {
@ -708,7 +648,7 @@ struct purify_arith_proc {
}
void operator()(goal & g, model_converter_ref & mc, bool produce_models) {
rw r(*this, false);
rw r(*this);
// purify
expr_ref new_curr(m());
proof_ref new_pr(m());

View file

@ -166,7 +166,7 @@ static void tst3() {
void tst_diff_logic() {
tst1();
tst2();
tst3();
// tst3();
}
#else
void tst_diff_logic() {

View file

@ -48,6 +48,7 @@ void dl_query_test(ast_manager & m, smt_params & fparams, params_ref& params,
bool use_magic_sets) {
dl_decl_util decl_util(m);
random_gen ran(0);
context ctx_q(m, fparams);
params.set_bool("magic_sets_for_queries", use_magic_sets);
@ -86,7 +87,7 @@ void dl_query_test(ast_manager & m, smt_params & fparams, params_ref& params,
warning_msg("cannot get sort size");
return;
}
uint64 num = rand()%sort_sz;
uint64 num = ran()%sort_sz;
app * el_b = decl_util.mk_numeral(num, sig_b[col]);
f_b.push_back(el_b);
app * el_q = decl_util.mk_numeral(num, sig_q[col]);
@ -112,7 +113,7 @@ void dl_query_test(ast_manager & m, smt_params & fparams, params_ref& params,
table_base::iterator fit = table_b.begin();
table_base::iterator fend = table_b.end();
for(; fit!=fend; ++fit) {
if(rand()%std::max(1u,table_sz/test_count)!=0) {
if(ran()%std::max(1u,table_sz/test_count)!=0) {
continue;
}
fit->get_fact(tf);
@ -131,6 +132,7 @@ void dl_query_test_wpa(smt_params & fparams, params_ref& params) {
arith_util arith(m);
const char * problem_dir = "C:\\tvm\\src\\z3_2\\debug\\test\\w0.datalog";
dl_decl_util dl_util(m);
random_gen ran(0);
std::cerr << "Testing queries on " << problem_dir <<"\n";
context ctx(m, fparams);
@ -151,8 +153,8 @@ void dl_query_test_wpa(smt_params & fparams, params_ref& params) {
TRUSTME( ctx.try_get_sort_constant_count(var_sort, var_sz) );
for(unsigned attempt=0; attempt<attempts; attempt++) {
unsigned el1 = rand()%var_sz;
unsigned el2 = rand()%var_sz;
unsigned el1 = ran()%var_sz;
unsigned el2 = ran()%var_sz;
expr_ref_vector q_args(m);
q_args.push_back(dl_util.mk_numeral(el1, var_sort));
@ -217,6 +219,9 @@ void tst_dl_query() {
params.set_uint("similarity_compressor", use_similar != 0);
for(unsigned use_magic_sets=0; use_magic_sets<=1; use_magic_sets++) {
if (!(use_restarts == 1 && use_similar == 0 && use_magic_sets == 1)) {
continue;
}
stopwatch watch;
watch.start();
std::cerr << "------- " << (use_restarts ? "With" : "Without") << " restarts -------\n";

View file

@ -171,7 +171,7 @@ static void tst2() {
rational int64_max("9223372036854775807");
rational int64_min(-int64_max - rational(1));
rational int64_min((-int64_max) - rational(1));
// is_int64
SASSERT(int64_max.is_int64());
SASSERT(int64_min.is_int64());

View file

@ -120,6 +120,7 @@ mpz_manager<SYNCH>::mpz_manager():
mpz_set_ui(m_tmp, max_l);
mpz_add(m_uint64_max, m_uint64_max, m_tmp);
mpz_init(m_int64_max);
mpz_init(m_int64_min);
max_l = static_cast<unsigned>(INT64_MAX % static_cast<int64>(UINT_MAX));
max_h = static_cast<unsigned>(INT64_MAX / static_cast<int64>(UINT_MAX));
@ -128,6 +129,8 @@ mpz_manager<SYNCH>::mpz_manager():
mpz_mul(m_int64_max, m_tmp, m_int64_max);
mpz_set_ui(m_tmp, max_l);
mpz_add(m_int64_max, m_tmp, m_int64_max);
mpz_neg(m_int64_min, m_int64_max);
mpz_sub_ui(m_int64_min, m_int64_min, 1);
#endif
mpz one(1);
@ -152,6 +155,7 @@ mpz_manager<SYNCH>::~mpz_manager() {
deallocate(m_arg[1]);
mpz_clear(m_uint64_max);
mpz_clear(m_int64_max);
mpz_clear(m_int64_min);
#endif
if (SYNCH)
omp_destroy_nest_lock(&m_lock);
@ -1299,10 +1303,9 @@ bool mpz_manager<SYNCH>::is_int64(mpz const & a) const {
if (is_small(a))
return true;
#ifndef _MP_GMP
if (!is_uint64(a)) {
if (!is_abs_uint64(a))
return false;
}
uint64 num get_uint64(a);
uint64 num = big_abs_to_uint64(a);
uint64 msb = static_cast<uint64>(1) << 63;
uint64 msb_val = msb & num;
if (a.m_val >= 0) {
@ -1318,7 +1321,7 @@ bool mpz_manager<SYNCH>::is_int64(mpz const & a) const {
}
#else
// GMP version
return mpz_cmp(*a.m_ptr, m_int64_max) <= 0;
return mpz_cmp(m_int64_min, *a.m_ptr) <= 0 && mpz_cmp(*a.m_ptr, m_int64_max) <= 0;
#endif
}
@ -1328,14 +1331,7 @@ uint64 mpz_manager<SYNCH>::get_uint64(mpz const & a) const {
return static_cast<uint64>(a.m_val);
#ifndef _MP_GMP
SASSERT(a.m_ptr->m_size > 0);
if (a.m_ptr->m_size == 1)
return digits(a)[0];
if (sizeof(digit_t) == sizeof(uint64))
// 64-bit machine
return digits(a)[0];
else
// 32-bit machine
return ((static_cast<uint64>(digits(a)[1]) << 32) | (static_cast<uint64>(digits(a)[0])));
return big_abs_to_uint64(a);
#else
// GMP version
if (sizeof(uint64) == sizeof(unsigned long)) {
@ -1360,7 +1356,7 @@ int64 mpz_manager<SYNCH>::get_int64(mpz const & a) const {
return static_cast<int64>(a.m_val);
#ifndef _MP_GMP
SASSERT(is_int64(a));
uint64 num = get_uint64(a);
uint64 num = big_abs_to_uint64(a);
if (a.m_val < 0) {
if (num != 0 && (num << 1) == 0)
return INT64_MIN;

View file

@ -168,6 +168,7 @@ class mpz_manager {
mpz_t * m_arg[2];
mpz_t m_uint64_max;
mpz_t m_int64_max;
mpz_t m_int64_min;
mpz_t * allocate() {
mpz_t * cell = reinterpret_cast<mpz_t*>(m_allocator.allocate(sizeof(mpz_t)));
@ -211,6 +212,30 @@ class mpz_manager {
static digit_t * digits(mpz const & c) { return c.m_ptr->m_digits; }
// Return true if the absolute value fits in a UINT64
static bool is_abs_uint64(mpz const & a) {
if (is_small(a))
return true;
if (sizeof(digit_t) == sizeof(uint64))
return size(a) <= 1;
else
return size(a) <= 2;
}
// CAST the absolute value into a UINT64
static uint64 big_abs_to_uint64(mpz const & a) {
SASSERT(is_abs_uint64(a));
SASSERT(!is_small(a));
if (a.m_ptr->m_size == 1)
return digits(a)[0];
if (sizeof(digit_t) == sizeof(uint64))
// 64-bit machine
return digits(a)[0];
else
// 32-bit machine
return ((static_cast<uint64>(digits(a)[1]) << 32) | (static_cast<uint64>(digits(a)[0])));
}
template<int IDX>
void get_sign_cell(mpz const & a, int & sign, mpz_cell * & cell) {
if (is_small(a)) {

View file

@ -45,7 +45,7 @@ COMPILE_TIME_ASSERT(sizeof(int64) == 8);
#define INT64_MIN static_cast<int64>(0x8000000000000000ull)
#endif
#ifndef INT64_MAX
#define INT64_MAX static_cast<int64>(0x0fffffffffffffffull)
#define INT64_MAX static_cast<int64>(0x7fffffffffffffffull)
#endif
#ifndef UINT64_MAX
#define UINT64_MAX 0xffffffffffffffffull