3
0
Fork 0
mirror of https://github.com/Z3Prover/z3 synced 2025-04-29 20:05:51 +00:00

replace lean to lp

Signed-off-by: Lev Nachmanson <levnach@microsoft.com>
This commit is contained in:
Lev Nachmanson 2017-07-10 11:06:37 -07:00 committed by Lev Nachmanson
parent db0a3f4358
commit d41c65a4f9
72 changed files with 1334 additions and 1213 deletions

View file

@ -97,12 +97,12 @@ void sparse_matrix<T, X>::set_with_no_adjusting(unsigned row, unsigned col, T va
template <typename T, typename X>
void sparse_matrix<T, X>::set(unsigned row, unsigned col, T val) { // should not be used in efficient code
SASSERT(row < dimension() && col < dimension());
lp_assert(row < dimension() && col < dimension());
// m_dense.set_elem(row, col, val);
row = adjust_row(row);
col = adjust_column(col);
set_with_no_adjusting(row, col, val);
// SASSERT(*this == m_dense);
// lp_assert(*this == m_dense);
}
template <typename T, typename X>
@ -276,8 +276,8 @@ vector<T> sparse_matrix<T, X>::get_full_row(unsigned i) const {
// Returns false if the resulting row is all zeroes, and true otherwise
template <typename T, typename X>
bool sparse_matrix<T, X>::pivot_row_to_row(unsigned i, const T& alpha, unsigned i0, lp_settings & settings ) {
SASSERT(i < dimension() && i0 < dimension());
SASSERT(i != i0);
lp_assert(i < dimension() && i0 < dimension());
lp_assert(i != i0);
unsigned pivot_col = adjust_column(i);
i = adjust_row(i);
i0 = adjust_row(i0);
@ -311,7 +311,7 @@ bool sparse_matrix<T, X>::pivot_row_to_row(unsigned i, const T& alpha, unsigned
}
// clean the work vector
// clp the work vector
for (unsigned k = 0; k < prev_size_i0; k++) {
m_work_pivot_vector[i0_row_vals[k].m_index] = -1;
}
@ -334,7 +334,7 @@ bool sparse_matrix<T, X>::pivot_row_to_row(unsigned i, const T& alpha, unsigned
// set the max val as well
// returns false if the resulting row is all zeroes, and true otherwise
template <typename T, typename X>
bool sparse_matrix<T, X>::set_row_from_work_vector_and_clean_work_vector_not_adjusted(unsigned i0, indexed_vector<T> & work_vec,
bool sparse_matrix<T, X>::set_row_from_work_vector_and_clp_work_vector_not_adjusted(unsigned i0, indexed_vector<T> & work_vec,
lp_settings & settings) {
remove_zero_elements_and_set_data_on_existing_elements_not_adjusted(i0, work_vec, settings);
// all non-zero elements in m_work_pivot_vector are new
@ -342,7 +342,7 @@ bool sparse_matrix<T, X>::set_row_from_work_vector_and_clean_work_vector_not_adj
if (numeric_traits<T>::is_zero(work_vec[j])) {
continue;
}
SASSERT(!settings.abs_val_is_smaller_than_drop_tolerance(work_vec[j]));
lp_assert(!settings.abs_val_is_smaller_than_drop_tolerance(work_vec[j]));
add_new_element(i0, adjust_column(j), work_vec[j]);
work_vec[j] = numeric_traits<T>::zero();
}
@ -387,7 +387,7 @@ void sparse_matrix<T, X>::remove_zero_elements_and_set_data_on_existing_elements
T val = work_vec[rj];
if (settings.abs_val_is_smaller_than_drop_tolerance(val)) {
remove_element(row_vals, row_el_iv);
SASSERT(numeric_traits<T>::is_zero(val));
lp_assert(numeric_traits<T>::is_zero(val));
} else {
m_columns[j].m_values[row_el_iv.m_other].set_value(row_el_iv.m_value = val);
work_vec[rj] = numeric_traits<T>::zero();
@ -408,7 +408,7 @@ void sparse_matrix<T, X>::add_columns_at_the_end(unsigned delta) {
template <typename T, typename X>
void sparse_matrix<T, X>::delete_column(int i) {
SASSERT(i < dimension());
lp_assert(i < dimension());
for (auto cell = m_columns[i].m_head; cell != nullptr;) {
auto next_cell = cell->m_down;
kill_cell(cell);
@ -418,7 +418,7 @@ void sparse_matrix<T, X>::delete_column(int i) {
template <typename T, typename X>
void sparse_matrix<T, X>::divide_row_by_constant(unsigned i, const T & t, lp_settings & settings) {
SASSERT(!settings.abs_val_is_smaller_than_zero_tolerance(t));
lp_assert(!settings.abs_val_is_smaller_than_zero_tolerance(t));
i = adjust_row(i);
for (auto & iv : m_rows[i]) {
T &v = iv.m_value;
@ -455,7 +455,7 @@ void sparse_matrix<T, X>::solve_y_U(vector<T> & y) const { // works by rows
// dense_matrix<T> deb(*this);
// T * clone_y = clone_vector<T>(y, dimension());
// deb.apply_from_right(clone_y);
// SASSERT(vectors_are_equal(rs, clone_y, dimension()));
// lp_assert(vectors_are_equal(rs, clone_y, dimension()));
// delete [] clone_y;
// delete [] rs;
#endif
@ -489,10 +489,10 @@ void sparse_matrix<T, X>::solve_y_U_indexed(indexed_vector<T> & y, const lp_sett
y.m_data[j] = zero_of_type<T>();
}
SASSERT(y.is_OK());
#if 0 && Z3DEBUG
lp_assert(y.is_OK());
#if 0 && LEAN_DEBUG
if (numeric_traits<T>::precise() == false)
SASSERT(vectors_are_equal(ycopy, y.m_data));
lp_assert(vectors_are_equal(ycopy, y.m_data));
#endif
}
@ -552,8 +552,8 @@ void sparse_matrix<T, X>::add_delta_to_solution(const vector<L>& del, vector<L>
template <typename T, typename X>
template <typename L>
void sparse_matrix<T, X>::add_delta_to_solution(const indexed_vector<L>& del, indexed_vector<L> & y) {
// SASSERT(del.is_OK());
// SASSERT(y.is_OK());
// lp_assert(del.is_OK());
// lp_assert(y.is_OK());
for (auto i : del.m_index) {
y.add_value_at_index(i, del[i]);
}
@ -561,24 +561,24 @@ void sparse_matrix<T, X>::add_delta_to_solution(const indexed_vector<L>& del, in
template <typename T, typename X>
template <typename L>
void sparse_matrix<T, X>::double_solve_U_y(indexed_vector<L>& y, const lp_settings & settings){
SASSERT(y.is_OK());
lp_assert(y.is_OK());
indexed_vector<L> y_orig(y); // copy y aside
vector<unsigned> active_rows;
solve_U_y_indexed_only(y, settings, active_rows);
SASSERT(y.is_OK());
lp_assert(y.is_OK());
find_error_in_solution_U_y_indexed(y_orig, y, active_rows);
// y_orig contains the error now
if (y_orig.m_index.size() * ratio_of_index_size_to_all_size<T>() < 32 * dimension()) {
active_rows.clear();
solve_U_y_indexed_only(y_orig, settings, active_rows);
add_delta_to_solution(y_orig, y);
y.clean_up();
y.clp_up();
} else { // the dense version
solve_U_y(y_orig.m_data);
add_delta_to_solution(y_orig.m_data, y.m_data);
y.restore_index_and_clean_from_data();
y.restore_index_and_clp_from_data();
}
SASSERT(y.is_OK());
lp_assert(y.is_OK());
}
template <typename T, typename X>
template <typename L>
@ -614,12 +614,12 @@ void sparse_matrix<T, X>::solve_U_y(vector<L> & y) { // it is a column wise vers
// dense_matrix<T> deb(*this);
// T * clone_y = clone_vector<T>(y, dimension());
// deb.apply_from_left(clone_y);
// SASSERT(vectors_are_equal(rs, clone_y, dimension()));
// lp_assert(vectors_are_equal(rs, clone_y, dimension()));
#endif
}
template <typename T, typename X>
void sparse_matrix<T, X>::process_index_recursively_for_y_U(unsigned j, vector<unsigned> & sorted_active_rows) {
SASSERT(m_processed[j] == false);
lp_assert(m_processed[j] == false);
m_processed[j]=true;
auto & row = m_rows[adjust_row(j)];
for (auto & c : row) {
@ -634,7 +634,7 @@ void sparse_matrix<T, X>::process_index_recursively_for_y_U(unsigned j, vector<u
template <typename T, typename X>
void sparse_matrix<T, X>::process_column_recursively(unsigned j, vector<unsigned> & sorted_active_rows) {
SASSERT(m_processed[j] == false);
lp_assert(m_processed[j] == false);
auto & mc = m_columns[adjust_column(j)].m_values;
for (auto & iv : mc) {
unsigned i = adjust_row_inverse(iv.m_index);
@ -699,12 +699,12 @@ void sparse_matrix<T, X>::solve_U_y_indexed_only(indexed_vector<L> & y, const lp
y[j] = zero_of_type<L>();
}
SASSERT(y.is_OK());
#ifdef Z3DEBUG
lp_assert(y.is_OK());
#ifdef LEAN_DEBUG
// dense_matrix<T,X> deb(this);
// vector<T> clone_y(y.m_data);
// deb.apply_from_left(clone_y);
// SASSERT(vectors_are_equal(rs, clone_y));
// lp_assert(vectors_are_equal(rs, clone_y));
#endif
}
@ -817,7 +817,7 @@ void sparse_matrix<T, X>::add_new_elements_of_w_and_clear_w(unsigned column_to_r
unsigned ai = adjust_row(i);
add_new_element(ai, column_to_replace, w_at_i);
auto & row_chunk = m_rows[ai];
SASSERT(row_chunk.size() > 0);
lp_assert(row_chunk.size() > 0);
if (abs(w_at_i) > abs(row_chunk[0].m_value))
put_max_index_to_0(row_chunk, static_cast<unsigned>(row_chunk.size()) - 1);
}
@ -848,7 +848,7 @@ unsigned sparse_matrix<T, X>::pivot_score(unsigned i, unsigned j) {
template <typename T, typename X>
void sparse_matrix<T, X>::enqueue_domain_into_pivot_queue() {
SASSERT(m_pivot_queue.size() == 0);
lp_assert(m_pivot_queue.size() == 0);
for (unsigned i = 0; i < dimension(); i++) {
auto & rh = m_rows[i];
unsigned rnz = static_cast<unsigned>(rh.size());
@ -934,7 +934,7 @@ void sparse_matrix<T, X>::update_active_pivots(unsigned row) {
for (const auto & iv : m_rows[arow]) {
col_header & ch = m_columns[iv.m_index];
int cols = static_cast<int>(ch.m_values.size()) - ch.m_shortened_markovitz - 1;
SASSERT(cols >= 0);
lp_assert(cols >= 0);
for (const auto &ivc : ch.m_values) {
unsigned i = ivc.m_index;
if (adjust_row_inverse(i) <= row) continue; // the i is not an active row
@ -960,7 +960,7 @@ bool sparse_matrix<T, X>::shorten_active_matrix(unsigned row, eta_matrix<T, X> *
for (auto & iv : row_values) {
const col_header& ch = m_columns[iv.m_index];
int cnz = static_cast<int>(ch.m_values.size()) - ch.m_shortened_markovitz - 1;
SASSERT(cnz >= 0);
lp_assert(cnz >= 0);
m_pivot_queue.enqueue(row, iv.m_index, rnz * cnz);
}
}
@ -976,7 +976,7 @@ unsigned sparse_matrix<T, X>::pivot_score_without_shortened_counters(unsigned i,
if (adjust_row_inverse(iv.m_index) < k)
cnz--;
}
SASSERT(cnz > 0);
lp_assert(cnz > 0);
return m_rows[i].m_values.size() * (cnz - 1);
}
#ifdef Z3DEBUG
@ -986,15 +986,15 @@ bool sparse_matrix<T, X>::can_improve_score_for_row(unsigned row, unsigned score
auto & row_vals = m_rows[arow].m_values;
auto & begin_iv = row_vals[0];
T row_max = abs(begin_iv.m_value);
SASSERT(adjust_column_inverse(begin_iv.m_index) >= k);
lp_assert(adjust_column_inverse(begin_iv.m_index) >= k);
if (pivot_score_without_shortened_counters(arow, begin_iv.m_index, k) < score) {
print_active_matrix(k);
return true;
}
for (unsigned jj = 1; jj < row_vals.size(); jj++) {
auto & iv = row_vals[jj];
SASSERT(adjust_column_inverse(iv.m_index) >= k);
SASSERT(abs(iv.m_value) <= row_max);
lp_assert(adjust_column_inverse(iv.m_index) >= k);
lp_assert(abs(iv.m_value) <= row_max);
if (c_partial_pivoting * abs(iv.m_value) < row_max) continue;
if (pivot_score_without_shortened_counters(arow, iv.m_index, k) < score) {
print_active_matrix(k);
@ -1008,7 +1008,7 @@ template <typename T, typename X>
bool sparse_matrix<T, X>::really_best_pivot(unsigned i, unsigned j, T const & c_partial_pivoting, unsigned k) {
unsigned queue_pivot_score = pivot_score_without_shortened_counters(i, j, k);
for (unsigned ii = k; ii < dimension(); ii++) {
SASSERT(!can_improve_score_for_row(ii, queue_pivot_score, c_partial_pivoting, k));
lp_assert(!can_improve_score_for_row(ii, queue_pivot_score, c_partial_pivoting, k));
}
return true;
}
@ -1041,7 +1041,7 @@ template <typename T, typename X>
bool sparse_matrix<T, X>::pivot_queue_is_correct_for_row(unsigned i, unsigned k) {
unsigned arow = adjust_row(i);
for (auto & iv : m_rows[arow].m_values) {
SASSERT(pivot_score_without_shortened_counters(arow, iv.m_index, k + 1) ==
lp_assert(pivot_score_without_shortened_counters(arow, iv.m_index, k + 1) ==
m_pivot_queue.get_priority(arow, iv.m_index));
}
return true;
@ -1050,8 +1050,8 @@ bool sparse_matrix<T, X>::pivot_queue_is_correct_for_row(unsigned i, unsigned k)
template <typename T, typename X>
bool sparse_matrix<T, X>::pivot_queue_is_correct_after_pivoting(int k) {
for (unsigned i = k + 1; i < dimension(); i++ )
SASSERT(pivot_queue_is_correct_for_row(i, k));
SASSERT(m_pivot_queue.is_correct());
lp_assert(pivot_queue_is_correct_for_row(i, k));
lp_assert(m_pivot_queue.is_correct());
return true;
}
#endif
@ -1070,7 +1070,7 @@ bool sparse_matrix<T, X>::get_pivot_for_column(unsigned &i, unsigned &j, int c_p
#ifdef Z3DEBUG
// if (!really_best_pivot(i, j, c_partial_pivoting, k)) {
// print_active_matrix(k);
// SASSERT(false);
// lp_assert(false);
// }
#endif
recover_pivot_queue(pivots_candidates_that_are_too_small);
@ -1103,7 +1103,7 @@ bool sparse_matrix<T, X>::shorten_columns_by_pivot_row(unsigned i, unsigned pivo
for (indexed_value<T> & iv : row_chunk) {
unsigned j = iv.m_index;
if (j == pivot_column) {
SASSERT(!col_is_active(j));
lp_assert(!col_is_active(j));
continue;
}
m_columns[j].shorten_markovich_by_one();
@ -1166,11 +1166,11 @@ template <typename T, typename X>
bool sparse_matrix<T, X>::is_upper_triangular_and_maximums_are_set_correctly_in_rows(lp_settings & settings) const {
for (unsigned i = 0; i < dimension(); i++) {
vector<indexed_value<T>> const & row_chunk = get_row_values(i);
SASSERT(row_chunk.size());
lp_assert(row_chunk.size());
T const & max = abs(row_chunk[0].m_value);
unsigned ai = adjust_row_inverse(i);
for (auto & iv : row_chunk) {
SASSERT(abs(iv.m_value) <= max);
lp_assert(abs(iv.m_value) <= max);
unsigned aj = adjust_column_inverse(iv.m_index);
if (!(ai <= aj || numeric_traits<T>::is_zero(iv.m_value)))
return false;
@ -1208,18 +1208,18 @@ void sparse_matrix<T, X>::check_column_vs_rows(unsigned col) {
indexed_value<T> & row_iv = column_iv_other(column_iv);
if (row_iv.m_index != col) {
// std::cout << "m_other in row does not belong to column " << col << ", but to column " << row_iv.m_index << std::endl;
SASSERT(false);
lp_assert(false);
}
if (& row_iv_other(row_iv) != &column_iv) {
// std::cout << "row and col do not point to each other" << std::endl;
SASSERT(false);
lp_assert(false);
}
if (row_iv.m_value != column_iv.m_value) {
// std::cout << "the data from col " << col << " for row " << column_iv.m_index << " is different in the column " << std::endl;
// std::cout << "in the col it is " << column_iv.m_value << ", but in the row it is " << row_iv.m_value << std::endl;
SASSERT(false);
lp_assert(false);
}
}
}
@ -1232,18 +1232,18 @@ void sparse_matrix<T, X>::check_row_vs_columns(unsigned row) {
if (column_iv.m_index != row) {
// std::cout << "col_iv does not point to correct row " << row << " but to " << column_iv.m_index << std::endl;
SASSERT(false);
lp_assert(false);
}
if (& row_iv != & column_iv_other(column_iv)) {
// std::cout << "row and col do not point to each other" << std::endl;
SASSERT(false);
lp_assert(false);
}
if (row_iv.m_value != column_iv.m_value) {
// std::cout << "the data from col " << column_iv.m_index << " for row " << row << " is different in the column " << std::endl;
// std::cout << "in the col it is " << column_iv.m_value << ", but in the row it is " << row_iv.m_value << std::endl;
SASSERT(false);
lp_assert(false);
}
}
}