diff --git a/src/api/api_context.cpp b/src/api/api_context.cpp index 05e1ca675..b10621d43 100644 --- a/src/api/api_context.cpp +++ b/src/api/api_context.cpp @@ -128,6 +128,7 @@ namespace api { for (unsigned i = 0; i < m_replay_stack.size(); ++i) { dealloc(m_replay_stack[i]); } + m_ast_trail.reset(); } reset_parser(); dealloc(m_solver); @@ -342,24 +343,21 @@ namespace api { void context::push() { get_smt_kernel().push(); - if (!m_user_ref_count) { - m_ast_lim.push_back(m_ast_trail.size()); - m_replay_stack.push_back(0); - } + m_ast_lim.push_back(m_ast_trail.size()); + m_replay_stack.push_back(0); } void context::pop(unsigned num_scopes) { for (unsigned i = 0; i < num_scopes; ++i) { - if (!m_user_ref_count) { - unsigned sz = m_ast_lim.back(); - m_ast_lim.pop_back(); - dealloc(m_replay_stack.back()); - m_replay_stack.pop_back(); - while (m_ast_trail.size() > sz) { - m_ast_trail.pop_back(); - } + unsigned sz = m_ast_lim.back(); + m_ast_lim.pop_back(); + dealloc(m_replay_stack.back()); + m_replay_stack.pop_back(); + while (m_ast_trail.size() > sz) { + m_ast_trail.pop_back(); } } + SASSERT(num_scopes <= get_smt_kernel().get_scope_level()); get_smt_kernel().pop(num_scopes); } diff --git a/src/api/api_interp.cpp b/src/api/api_interp.cpp index 03d5ff843..e06ad5192 100644 --- a/src/api/api_interp.cpp +++ b/src/api/api_interp.cpp @@ -411,7 +411,7 @@ static void get_file_params(const char *filename, hash_map= 0 && eqpos < tok.size()){ + if(eqpos != std::string::npos){ std::string left = tok.substr(0,eqpos); std::string right = tok.substr(eqpos+1,tok.size()-eqpos-1); params[left] = right; diff --git a/src/api/api_solver_old.cpp b/src/api/api_solver_old.cpp index e0533fbd9..b81fb2f2c 100644 --- a/src/api/api_solver_old.cpp +++ b/src/api/api_solver_old.cpp @@ -40,7 +40,7 @@ extern "C" { LOG_Z3_pop(c, num_scopes); RESET_ERROR_CODE(); CHECK_SEARCHING(c); - if (num_scopes > mk_c(c)->get_smt_kernel().get_scope_level()) { + if (num_scopes > mk_c(c)->get_num_scopes()) { SET_ERROR_CODE(Z3_IOB); return; } diff --git a/src/api/python/z3.py b/src/api/python/z3.py index 50d29a790..879b19597 100644 --- a/src/api/python/z3.py +++ b/src/api/python/z3.py @@ -6960,7 +6960,7 @@ def substitute(t, *m): if isinstance(m, tuple): m1 = _get_args(m) if isinstance(m1, list): - m = _get_args(m1) + m = m1 if __debug__: _z3_assert(is_expr(t), "Z3 expression expected") _z3_assert(all([isinstance(p, tuple) and is_expr(p[0]) and is_expr(p[1]) and p[0].sort().eq(p[1].sort()) for p in m]), "Z3 invalid substitution, expression pairs expected.") @@ -7344,7 +7344,7 @@ def binary_interpolant(a,b,p=None,ctx=None): >>> x = Int('x') >>> print binary_interpolant(x<0,x>2) - x <= 2 + Not(x >= 0) """ f = And(Interp(a),b) return tree_interpolant(f,p,ctx)[0] diff --git a/src/api/z3_api.h b/src/api/z3_api.h index 4ffdfa9c2..e1c42667a 100644 --- a/src/api/z3_api.h +++ b/src/api/z3_api.h @@ -7069,7 +7069,7 @@ END_MLAPI_EXCLUDE \mlonly then a valid model is returned. Otherwise, it is unsafe to use the returned model.\endmlonly \conly The caller is responsible for deleting the model using the function #Z3_del_model. - \conly \remark In constrast with the rest of the Z3 API, the reference counter of the + \conly \remark In contrast with the rest of the Z3 API, the reference counter of the \conly model is incremented. This is to guarantee backward compatibility. In previous \conly versions, models did not support reference counting. @@ -7182,6 +7182,11 @@ END_MLAPI_EXCLUDE \brief Delete a model object. \sa Z3_check_and_get_model + + \conly \remark The Z3_check_and_get_model automatically increments a reference count on the model. + \conly The expected usage is that models created by that method are deleted using Z3_del_model. + \conly This is for backwards compatibility and in contrast to the rest of the API where + \conly callers are responsible for managing reference counts. \deprecated Subsumed by Z3_solver API diff --git a/src/muz/base/dl_context.cpp b/src/muz/base/dl_context.cpp index fbef46ae3..3d0c7c880 100644 --- a/src/muz/base/dl_context.cpp +++ b/src/muz/base/dl_context.cpp @@ -1104,10 +1104,10 @@ namespace datalog { void context::get_raw_rule_formulas(expr_ref_vector& rules, svector& names){ for (unsigned i = 0; i < m_rule_fmls.size(); ++i) { - expr_ref r = bind_variables(m_rule_fmls[i].get(), true); - rules.push_back(r.get()); - // rules.push_back(m_rule_fmls[i].get()); - names.push_back(m_rule_names[i]); + expr_ref r = bind_variables(m_rule_fmls[i].get(), true); + rules.push_back(r.get()); + // rules.push_back(m_rule_fmls[i].get()); + names.push_back(m_rule_names[i]); } } diff --git a/src/muz/rel/dl_compiler.cpp b/src/muz/rel/dl_compiler.cpp index 276e7b836..2a2507d7f 100644 --- a/src/muz/rel/dl_compiler.cpp +++ b/src/muz/rel/dl_compiler.cpp @@ -589,49 +589,8 @@ namespace datalog { dealloc = true; } - //enforce negative predicates - unsigned ut_len=r->get_uninterpreted_tail_size(); - for(unsigned i=pt_len; iget_tail(i); - func_decl * neg_pred = neg_tail->get_decl(); - variable_intersection neg_intersection(m_context.get_manager()); - neg_intersection.populate(single_res_expr, neg_tail); - unsigned_vector t_cols(neg_intersection.size(), neg_intersection.get_cols1()); - unsigned_vector neg_cols(neg_intersection.size(), neg_intersection.get_cols2()); - - unsigned neg_len = neg_tail->get_num_args(); - for(unsigned i=0; iget_arg(i); - if(is_var(e)) { - continue; - } - SASSERT(is_app(e)); - relation_sort arg_sort; - m_context.get_rel_context()->get_rmanager().from_predicate(neg_pred, i, arg_sort); - reg_idx new_reg; - bool new_dealloc; - make_add_constant_column(head_pred, filtered_res, arg_sort, to_app(e), new_reg, new_dealloc, acc); - - if (dealloc) - make_dealloc_non_void(filtered_res, acc); - dealloc = new_dealloc; - filtered_res = new_reg; // here filtered_res value gets changed !! - - t_cols.push_back(single_res_expr.size()); - neg_cols.push_back(i); - single_res_expr.push_back(e); - } - SASSERT(t_cols.size()==neg_cols.size()); - - reg_idx neg_reg = m_pred_regs.find(neg_pred); - if (!dealloc) - make_clone(filtered_res, filtered_res, acc); - acc.push_back(instruction::mk_filter_by_negation(filtered_res, neg_reg, t_cols.size(), - t_cols.c_ptr(), neg_cols.c_ptr())); - dealloc = true; - } - // enforce interpreted tail predicates + unsigned ut_len = r->get_uninterpreted_tail_size(); unsigned ft_len = r->get_tail_size(); // full tail ptr_vector tail; for (unsigned tail_index = ut_len; tail_index < ft_len; ++tail_index) { @@ -738,6 +697,47 @@ namespace datalog { dealloc = true; } + //enforce negative predicates + for (unsigned i = pt_len; iget_tail(i); + func_decl * neg_pred = neg_tail->get_decl(); + variable_intersection neg_intersection(m_context.get_manager()); + neg_intersection.populate(single_res_expr, neg_tail); + unsigned_vector t_cols(neg_intersection.size(), neg_intersection.get_cols1()); + unsigned_vector neg_cols(neg_intersection.size(), neg_intersection.get_cols2()); + + unsigned neg_len = neg_tail->get_num_args(); + for (unsigned i = 0; iget_arg(i); + if (is_var(e)) { + continue; + } + SASSERT(is_app(e)); + relation_sort arg_sort; + m_context.get_rel_context()->get_rmanager().from_predicate(neg_pred, i, arg_sort); + reg_idx new_reg; + bool new_dealloc; + make_add_constant_column(head_pred, filtered_res, arg_sort, to_app(e), new_reg, new_dealloc, acc); + + if (dealloc) + make_dealloc_non_void(filtered_res, acc); + dealloc = new_dealloc; + filtered_res = new_reg; // here filtered_res value gets changed !! + + t_cols.push_back(single_res_expr.size()); + neg_cols.push_back(i); + single_res_expr.push_back(e); + } + SASSERT(t_cols.size() == neg_cols.size()); + + reg_idx neg_reg = m_pred_regs.find(neg_pred); + if (!dealloc) + make_clone(filtered_res, filtered_res, acc); + acc.push_back(instruction::mk_filter_by_negation(filtered_res, neg_reg, t_cols.size(), + t_cols.c_ptr(), neg_cols.c_ptr())); + dealloc = true; + } + #if 0 // this version is potentially better for non-symbolic tables, // since it constraints each unbound column at a time (reducing the diff --git a/src/muz/rel/dl_vector_relation.h b/src/muz/rel/dl_vector_relation.h index 114f4ca43..4d0917359 100644 --- a/src/muz/rel/dl_vector_relation.h +++ b/src/muz/rel/dl_vector_relation.h @@ -62,8 +62,6 @@ namespace datalog { dealloc(m_elems); } - virtual bool can_swap() const { return true; } - virtual void swap(relation_base& other) { vector_relation& o = dynamic_cast(other); if (&o == this) return; diff --git a/src/smt/smt_model_checker.cpp b/src/smt/smt_model_checker.cpp index 526447f9f..7053d63ec 100644 --- a/src/smt/smt_model_checker.cpp +++ b/src/smt/smt_model_checker.cpp @@ -136,9 +136,8 @@ namespace smt { if (cex == 0) return false; // no model available. unsigned num_decls = q->get_num_decls(); - unsigned num_sks = sks.size(); // Remark: sks were created for the flat version of q. - SASSERT(num_sks >= num_decls); + SASSERT(sks.size() >= num_decls); expr_ref_buffer bindings(m_manager); bindings.resize(num_decls); unsigned max_generation = 0; diff --git a/src/smt/theory_arith.h b/src/smt/theory_arith.h index 998dd72e6..d1af762bb 100644 --- a/src/smt/theory_arith.h +++ b/src/smt/theory_arith.h @@ -410,6 +410,7 @@ namespace smt { atoms m_atoms; // set of theory atoms ptr_vector m_asserted_bounds; // set of asserted bounds unsigned m_asserted_qhead; + ptr_vector m_new_atoms; // new bound atoms that have yet to be internalized. svector m_nl_monomials; // non linear monomials svector m_nl_propagated; // non linear monomials that became linear v_dependency_manager m_dep_manager; // for tracking bounds during non-linear reasoning @@ -570,6 +571,22 @@ namespace smt { void mk_clause(literal l1, literal l2, unsigned num_params, parameter * params); void mk_clause(literal l1, literal l2, literal l3, unsigned num_params, parameter * params); void mk_bound_axioms(atom * a); + void mk_bound_axiom(atom* a1, atom* a2); + void flush_bound_axioms(); + typename atoms::iterator next_sup(atom* a1, atom_kind kind, + typename atoms::iterator it, + typename atoms::iterator end, + bool& found_compatible); + typename atoms::iterator next_inf(atom* a1, atom_kind kind, + typename atoms::iterator it, + typename atoms::iterator end, + bool& found_compatible); + typename atoms::iterator first(atom_kind kind, + typename atoms::iterator it, + typename atoms::iterator end); + struct compare_atoms { + bool operator()(atom* a1, atom* a2) const { return a1->get_k() < a2->get_k(); } + }; virtual bool default_internalizer() const { return false; } virtual bool internalize_atom(app * n, bool gate_ctx); virtual bool internalize_term(app * term); diff --git a/src/smt/theory_arith_core.h b/src/smt/theory_arith_core.h index fbc043048..c743562da 100644 --- a/src/smt/theory_arith_core.h +++ b/src/smt/theory_arith_core.h @@ -348,13 +348,24 @@ namespace smt { context & ctx = get_context(); simplifier & s = ctx.get_simplifier(); expr_ref s_ante(m), s_conseq(m); + expr* s_conseq_n, * s_ante_n; + bool negated; proof_ref pr(m); + s(ante, s_ante, pr); + negated = m.is_not(s_ante, s_ante_n); + if (negated) s_ante = s_ante_n; ctx.internalize(s_ante, false); literal l_ante = ctx.get_literal(s_ante); + if (negated) l_ante.neg(); + s(conseq, s_conseq, pr); + negated = m.is_not(s_conseq, s_conseq_n); + if (negated) s_conseq = s_conseq_n; ctx.internalize(s_conseq, false); literal l_conseq = ctx.get_literal(s_conseq); + if (negated) l_conseq.neg(); + literal lits[2] = {l_ante, l_conseq}; ctx.mk_th_axiom(get_id(), 2, lits); if (ctx.relevancy()) { @@ -800,48 +811,238 @@ namespace smt { template void theory_arith::mk_bound_axioms(atom * a1) { theory_var v = a1->get_var(); - literal l1(a1->get_bool_var()); + atoms & occs = m_var_occs[v]; + if (!get_context().is_searching()) { + // + // NB. We make an assumption that user push calls propagation + // before internal scopes are pushed. This flushes all newly + // asserted atoms into the right context. + // + m_new_atoms.push_back(a1); + return; + } numeral const & k1(a1->get_k()); atom_kind kind1 = a1->get_atom_kind(); TRACE("mk_bound_axioms", tout << "making bound axioms for v" << v << " " << kind1 << " " << k1 << "\n";); - atoms & occs = m_var_occs[v]; typename atoms::iterator it = occs.begin(); typename atoms::iterator end = occs.end(); + + typename atoms::iterator lo_inf = end, lo_sup = end; + typename atoms::iterator hi_inf = end, hi_sup = end; for (; it != end; ++it) { - atom * a2 = *it; - literal l2(a2->get_bool_var()); - numeral const & k2 = a2->get_k(); - atom_kind kind2 = a2->get_atom_kind(); + atom * a2 = *it; + numeral const & k2(a2->get_k()); + atom_kind kind2 = a2->get_atom_kind(); SASSERT(k1 != k2 || kind1 != kind2); - SASSERT(a2->get_var() == v); - parameter coeffs[3] = { parameter(symbol("farkas")), parameter(rational(1)), parameter(rational(1)) }; - if (kind1 == A_LOWER) { - if (kind2 == A_LOWER) { - // x >= k1, x >= k2 - if (k1 >= k2) mk_clause(~l1, l2, 3, coeffs); - else mk_clause(~l2, l1, 3, coeffs); + if (kind2 == A_LOWER) { + if (k2 < k1) { + if (lo_inf == end || k2 > (*lo_inf)->get_k()) { + lo_inf = it; + } } - else { - // x >= k1, x <= k2 - if (k1 > k2) mk_clause(~l1, ~l2, 3, coeffs); - else mk_clause(l1, l2, 3, coeffs); + else if (lo_sup == end || k2 < (*lo_sup)->get_k()) { + lo_sup = it; } } - else { - if (kind2 == A_LOWER) { - // x <= k1, x >= k2 - if (k1 < k2) mk_clause(~l1, ~l2, 3, coeffs); - else mk_clause(l1, l2, 3, coeffs); + else if (k2 < k1) { + if (hi_inf == end || k2 > (*hi_inf)->get_k()) { + hi_inf = it; + } + } + else if (hi_sup == end || k2 < (*hi_sup)->get_k()) { + hi_sup = it; + } + } + if (lo_inf != end) mk_bound_axiom(a1, *lo_inf); + if (lo_sup != end) mk_bound_axiom(a1, *lo_sup); + if (hi_inf != end) mk_bound_axiom(a1, *hi_inf); + if (hi_sup != end) mk_bound_axiom(a1, *hi_sup); + } + + template + void theory_arith::mk_bound_axiom(atom* a1, atom* a2) { + theory_var v = a1->get_var(); + literal l1(a1->get_bool_var()); + literal l2(a2->get_bool_var()); + numeral const & k1(a1->get_k()); + numeral const & k2(a2->get_k()); + atom_kind kind1 = a1->get_atom_kind(); + atom_kind kind2 = a2->get_atom_kind(); + bool v_is_int = is_int(v); + SASSERT(v == a2->get_var()); + SASSERT(k1 != k2 || kind1 != kind2); + parameter coeffs[3] = { parameter(symbol("farkas")), + parameter(rational(1)), parameter(rational(1)) }; + + if (kind1 == A_LOWER) { + if (kind2 == A_LOWER) { + if (k2 <= k1) { + mk_clause(~l1, l2, 3, coeffs); } else { - // x <= k1, x <= k2 - if (k1 < k2) mk_clause(~l1, l2, 3, coeffs); - else mk_clause(~l2, l1, 3, coeffs); + mk_clause(l1, ~l2, 3, coeffs); + } + } + else if (k1 <= k2) { + // k1 <= k2, k1 <= x or x <= k2 + mk_clause(l1, l2, 3, coeffs); + } + else { + // k1 > hi_inf, k1 <= x => ~(x <= hi_inf) + mk_clause(~l1, ~l2, 3, coeffs); + if (v_is_int && k1 == k2 + numeral(1)) { + // k1 <= x or x <= k1-1 + mk_clause(l1, l2, 3, coeffs); } } } + else if (kind2 == A_LOWER) { + if (k1 >= k2) { + // k1 >= lo_inf, k1 >= x or lo_inf <= x + mk_clause(l1, l2, 3, coeffs); + } + else { + // k1 < k2, k2 <= x => ~(x <= k1) + mk_clause(~l1, ~l2, 3, coeffs); + if (v_is_int && k1 == k2 - numeral(1)) { + // x <= k1 or k1+l <= x + mk_clause(l1, l2, 3, coeffs); + } + + } + } + else { + // kind1 == A_UPPER, kind2 == A_UPPER + if (k1 >= k2) { + // k1 >= k2, x <= k2 => x <= k1 + mk_clause(l1, ~l2, 3, coeffs); + } + else { + // k1 <= hi_sup , x <= k1 => x <= hi_sup + mk_clause(~l1, l2, 3, coeffs); + } + } } + template + void theory_arith::flush_bound_axioms() { + while (!m_new_atoms.empty()) { + ptr_vector atoms; + atoms.push_back(m_new_atoms.back()); + m_new_atoms.pop_back(); + theory_var v = atoms.back()->get_var(); + for (unsigned i = 0; i < m_new_atoms.size(); ++i) { + if (m_new_atoms[i]->get_var() == v) { + atoms.push_back(m_new_atoms[i]); + m_new_atoms[i] = m_new_atoms.back(); + m_new_atoms.pop_back(); + --i; + } + } + ptr_vector occs(m_var_occs[v]); + + std::sort(atoms.begin(), atoms.end(), compare_atoms()); + std::sort(occs.begin(), occs.end(), compare_atoms()); + + typename atoms::iterator begin1 = occs.begin(); + typename atoms::iterator begin2 = occs.begin(); + typename atoms::iterator end = occs.end(); + begin1 = first(A_LOWER, begin1, end); + begin2 = first(A_UPPER, begin2, end); + + typename atoms::iterator lo_inf = begin1, lo_sup = begin1; + typename atoms::iterator hi_inf = begin2, hi_sup = begin2; + typename atoms::iterator lo_inf1 = begin1, lo_sup1 = begin1; + typename atoms::iterator hi_inf1 = begin2, hi_sup1 = begin2; + bool flo_inf, fhi_inf, flo_sup, fhi_sup; + ptr_addr_hashtable visited; + for (unsigned i = 0; i < atoms.size(); ++i) { + atom* a1 = atoms[i]; + lo_inf1 = next_inf(a1, A_LOWER, lo_inf, end, flo_inf); + hi_inf1 = next_inf(a1, A_UPPER, hi_inf, end, fhi_inf); + lo_sup1 = next_sup(a1, A_LOWER, lo_sup, end, flo_sup); + hi_sup1 = next_sup(a1, A_UPPER, hi_sup, end, fhi_sup); + if (lo_inf1 != end) lo_inf = lo_inf1; + if (lo_sup1 != end) lo_sup = lo_sup1; + if (hi_inf1 != end) hi_inf = hi_inf1; + if (hi_sup1 != end) hi_sup = hi_sup1; + if (!flo_inf) lo_inf = end; + if (!fhi_inf) hi_inf = end; + if (!flo_sup) lo_sup = end; + if (!fhi_sup) hi_sup = end; + visited.insert(a1); + if (lo_inf1 != end && lo_inf != end && !visited.contains(*lo_inf)) mk_bound_axiom(a1, *lo_inf); + if (lo_sup1 != end && lo_sup != end && !visited.contains(*lo_sup)) mk_bound_axiom(a1, *lo_sup); + if (hi_inf1 != end && hi_inf != end && !visited.contains(*hi_inf)) mk_bound_axiom(a1, *hi_inf); + if (hi_sup1 != end && hi_sup != end && !visited.contains(*hi_sup)) mk_bound_axiom(a1, *hi_sup); + } + } + } + + template + typename theory_arith::atoms::iterator + theory_arith::first( + atom_kind kind, + typename atoms::iterator it, + typename atoms::iterator end) { + for (; it != end; ++it) { + atom* a = *it; + if (a->get_atom_kind() == kind) return it; + } + return end; + } + + template + typename theory_arith::atoms::iterator + theory_arith::next_inf( + atom* a1, + atom_kind kind, + typename atoms::iterator it, + typename atoms::iterator end, + bool& found_compatible) { + numeral const & k1(a1->get_k()); + typename atoms::iterator result = end; + found_compatible = false; + for (; it != end; ++it) { + atom * a2 = *it; + if (a1 == a2) continue; + if (a2->get_atom_kind() != kind) continue; + numeral const & k2(a2->get_k()); + found_compatible = true; + if (k2 <= k1) { + result = it; + } + else { + break; + } + } + return result; + } + + template + typename theory_arith::atoms::iterator + theory_arith::next_sup( + atom* a1, + atom_kind kind, + typename atoms::iterator it, + typename atoms::iterator end, + bool& found_compatible) { + numeral const & k1(a1->get_k()); + found_compatible = false; + for (; it != end; ++it) { + atom * a2 = *it; + if (a1 == a2) continue; + if (a2->get_atom_kind() != kind) continue; + numeral const & k2(a2->get_k()); + found_compatible = true; + if (k1 < k2) { + return it; + } + } + return end; + } + + template bool theory_arith::internalize_atom(app * n, bool gate_ctx) { TRACE("arith_internalize", tout << "internalising atom:\n" << mk_pp(n, this->get_manager()) << "\n";); @@ -879,7 +1080,7 @@ namespace smt { bool_var bv = ctx.mk_bool_var(n); ctx.set_var_theory(bv, get_id()); rational _k; - m_util.is_numeral(rhs, _k); + VERIFY(m_util.is_numeral(rhs, _k)); numeral k(_k); atom * a = alloc(atom, bv, v, k, kind); mk_bound_axioms(a); @@ -927,6 +1128,7 @@ namespace smt { template void theory_arith::assign_eh(bool_var v, bool is_true) { + TRACE("arith", tout << "v" << v << " " << is_true << "\n";); atom * a = get_bv2a(v); if (!a) return; SASSERT(get_context().get_assignment(a->get_bool_var()) != l_undef); @@ -1142,6 +1344,7 @@ namespace smt { CASSERT("arith", wf_columns()); CASSERT("arith", valid_row_assignment()); + flush_bound_axioms(); propagate_linear_monomials(); while (m_asserted_qhead < m_asserted_bounds.size()) { bound * b = m_asserted_bounds[m_asserted_qhead]; @@ -2421,6 +2624,8 @@ namespace smt { if (val == l_undef) continue; // TODO: check if the following line is a bottleneck + TRACE("arith", tout << "v" << a->get_bool_var() << " " << (val == l_true) << "\n";); + a->assign_eh(val == l_true, get_epsilon(a->get_var())); if (val != l_undef && a->get_bound_kind() == b->get_bound_kind()) { SASSERT((ctx.get_assignment(bv) == l_true) == a->is_true()); @@ -2916,6 +3121,7 @@ namespace smt { SASSERT(m_to_patch.empty()); m_to_check.reset(); m_in_to_check.reset(); + m_new_atoms.reset(); CASSERT("arith", wf_rows()); CASSERT("arith", wf_columns()); CASSERT("arith", valid_row_assignment()); diff --git a/src/util/debug.h b/src/util/debug.h index 9e519982f..a36743f73 100644 --- a/src/util/debug.h +++ b/src/util/debug.h @@ -73,7 +73,7 @@ bool is_debug_enabled(const char * tag); UNREACHABLE(); \ } #else -#define VERIFY(_x_) (_x_) +#define VERIFY(_x_) (void)(_x_) #endif #define MAKE_NAME2(LINE) zofty_ ## LINE