/* Copyright (c) 2017 Microsoft Corporation Author: Lev Nachmanson */ #include "util/vector.h" #include "util/lp/sparse_matrix.h" #include #include namespace lean { template void sparse_matrix::copy_column_from_static_matrix(unsigned col, static_matrix const &A, unsigned col_index_in_the_new_matrix) { vector const & A_col_vector = A.m_columns[col]; unsigned size = static_cast(A_col_vector.size()); vector> & new_column_vector = m_columns[col_index_in_the_new_matrix].m_values; for (unsigned l = 0; l < size; l++) { column_cell const & col_cell = A_col_vector[l]; unsigned col_offset = static_cast(new_column_vector.size()); vector> & row_vector = m_rows[col_cell.m_i]; unsigned row_offset = static_cast(row_vector.size()); const T & val = A.get_val(col_cell); new_column_vector.push_back(indexed_value(val, col_cell.m_i, row_offset)); row_vector.push_back(indexed_value(val, col_index_in_the_new_matrix, col_offset)); m_n_of_active_elems++; } } template void sparse_matrix::copy_B(static_matrix const &A, vector & basis) { unsigned m = A.row_count(); // this should be the size of basis for (unsigned j = m; j-- > 0;) { copy_column_from_static_matrix(basis[j], A, j); } } // constructor that copies columns of the basis from A template sparse_matrix::sparse_matrix(static_matrix const &A, vector & basis) : m_n_of_active_elems(0), m_pivot_queue(A.row_count()), m_row_permutation(A.row_count()), m_column_permutation(A.row_count()), m_work_pivot_vector(A.row_count(), -1), m_processed(A.row_count()) { init_row_headers(); init_column_headers(); copy_B(A, basis); } template void sparse_matrix::set_with_no_adjusting_for_row(unsigned row, unsigned col, T val) { // should not be used in efficient code vector> & row_vec = m_rows[row]; for (auto & iv : row_vec) { if (iv.m_index == col) { iv.set_value(val); return; } } // have not found the column between the indices row_vec.push_back(indexed_value(val, col)); // what about m_other ??? } template void sparse_matrix::set_with_no_adjusting_for_col(unsigned row, unsigned col, T val) { // should not be used in efficient code vector> & col_vec = m_columns[col].m_values; for (auto & iv : col_vec) { if (iv.m_index == row) { iv.set_value(val); return; } } // have not found the column between the indices col_vec.push_back(indexed_value(val, row)); // what about m_other ??? } template void sparse_matrix::set_with_no_adjusting(unsigned row, unsigned col, T val) { // should not be used in efficient code set_with_no_adjusting_for_row(row, col, val); set_with_no_adjusting_for_col(row, col, val); } template void sparse_matrix::set(unsigned row, unsigned col, T val) { // should not be used in efficient code lean_assert(row < dimension() && col < dimension()); // m_dense.set_elem(row, col, val); row = adjust_row(row); col = adjust_column(col); set_with_no_adjusting(row, col, val); // lean_assert(*this == m_dense); } template T const & sparse_matrix::get_not_adjusted(unsigned row, unsigned col) const { for (indexed_value const & iv : m_rows[row]) { if (iv.m_index == col) { return iv.m_value; } } return numeric_traits::zero(); } template T const & sparse_matrix::get(unsigned row, unsigned col) const { // should not be used in efficient code row = adjust_row(row); auto & row_chunk = m_rows[row]; col = adjust_column(col); for (indexed_value const & iv : row_chunk) { if (iv.m_index == col) { return iv.m_value; } } return numeric_traits::zero(); } // constructor creating a zero matrix of dim*dim template sparse_matrix::sparse_matrix(unsigned dim) : m_pivot_queue(dim), // dim will be the initial size of the queue m_row_permutation(dim), m_column_permutation(dim), m_work_pivot_vector(dim, -1), m_processed(dim) { init_row_headers(); init_column_headers(); } template void sparse_matrix::init_row_headers() { for (unsigned l = 0; l < m_row_permutation.size(); l++) { m_rows.push_back(vector>()); } } template void sparse_matrix::init_column_headers() { // we alway have only square sparse_matrix for (unsigned l = 0; l < m_row_permutation.size(); l++) { m_columns.push_back(col_header()); } } template unsigned sparse_matrix::lowest_row_in_column(unsigned j) { auto & mc = get_column_values(adjust_column(j)); unsigned ret = 0; for (auto & iv : mc) { unsigned row = adjust_row_inverse(iv.m_index); if (row > ret) { ret = row; } } return ret; } template void sparse_matrix::remove_element(vector> & row_vals, unsigned row_offset, vector> & column_vals, unsigned column_offset) { if (column_offset != column_vals.size() - 1) { auto & column_iv = column_vals[column_offset] = column_vals.back(); // copy from the tail column_iv_other(column_iv).m_other = column_offset; if (row_offset != row_vals.size() - 1) { auto & row_iv = row_vals[row_offset] = row_vals.back(); // copy from the tail row_iv_other(row_iv).m_other = row_offset; } } else if (row_offset != row_vals.size() - 1) { auto & row_iv = row_vals[row_offset] = row_vals.back(); // copy from the tail row_iv_other(row_iv).m_other = row_offset; } // do nothing - just decrease the sizes column_vals.pop_back(); row_vals.pop_back(); m_n_of_active_elems--; // the value is correct only when refactoring } template void sparse_matrix::remove_element(vector> & row_chunk, indexed_value & row_el_iv) { auto & column_chunk = get_column_values(row_el_iv.m_index); indexed_value & col_el_iv = column_chunk[row_el_iv.m_other]; remove_element(row_chunk, col_el_iv.m_other, column_chunk, row_el_iv.m_other); } template void sparse_matrix::put_max_index_to_0(vector> & row_vals, unsigned max_index) { if (max_index == 0) return; indexed_value * max_iv = & row_vals[max_index]; indexed_value * start_iv = & row_vals[0]; // update the "other" columns elements which are bound to the start_iv and max_iv m_columns[max_iv->m_index].m_values[max_iv->m_other].m_other = 0; m_columns[start_iv->m_index].m_values[start_iv->m_other].m_other = max_index; // swap the elements indexed_value t = * max_iv; * max_iv = * start_iv; * start_iv = t; } template void sparse_matrix::set_max_in_row(vector> & row_vals) { if (row_vals.size() == 0) return; T max_val = abs(row_vals[0].m_value); unsigned max_index = 0; for (unsigned i = 1; i < row_vals.size(); i++) { T iabs = abs(row_vals[i].m_value); if (iabs > max_val) { max_val = iabs; max_index = i; } } put_max_index_to_0(row_vals, max_index); } template bool sparse_matrix::pivot_with_eta(unsigned i, eta_matrix *eta_matrix, lp_settings & settings) { const T& pivot = eta_matrix->get_diagonal_element(); for (auto & it : eta_matrix->m_column_vector.m_data) { if (!pivot_row_to_row(i, it.second, it.first, settings)) { return false; } } divide_row_by_constant(i, pivot, settings); if (!shorten_active_matrix(i, eta_matrix)) { return false; } return true; } // returns the offset of the pivot column in the row template void sparse_matrix::scan_row_to_work_vector_and_remove_pivot_column(unsigned row, unsigned pivot_column) { auto & rvals = m_rows[row]; unsigned size = rvals.size(); for (unsigned j = 0; j < size; j++) { auto & iv = rvals[j]; if (iv.m_index != pivot_column) { m_work_pivot_vector[iv.m_index] = j; } else { remove_element(rvals, iv); j--; size--; } } } #ifdef LEAN_DEBUG template vector sparse_matrix::get_full_row(unsigned i) const { vector r; for (unsigned j = 0; j < column_count(); j++) r.push_back(get(i, j)); return r; } #endif // This method pivots row i to row i0 by muliplying row i by // alpha and adding it to row i0. // After pivoting the row i0 has a max abs value set correctly at the beginning of m_start, // Returns false if the resulting row is all zeroes, and true otherwise template bool sparse_matrix::pivot_row_to_row(unsigned i, const T& alpha, unsigned i0, lp_settings & settings ) { lean_assert(i < dimension() && i0 < dimension()); lean_assert(i != i0); unsigned pivot_col = adjust_column(i); i = adjust_row(i); i0 = adjust_row(i0); vector became_zeros; // the offset of element of the pivot column in row i0 scan_row_to_work_vector_and_remove_pivot_column(i0, pivot_col); auto & i0_row_vals = m_rows[i0]; // run over the pivot row and update row i0 unsigned prev_size_i0 = i0_row_vals.size(); for (const auto & iv : m_rows[i]) { unsigned j = iv.m_index; if (j == pivot_col) continue; T alv = alpha * iv.m_value; int j_offs = m_work_pivot_vector[j]; if (j_offs == -1) { // it is a new element if (!settings.abs_val_is_smaller_than_drop_tolerance(alv)) { add_new_element(i0, j, alv); } } else { auto & row_el_iv = i0_row_vals[j_offs]; row_el_iv.m_value += alv; if (settings.abs_val_is_smaller_than_drop_tolerance(row_el_iv.m_value)) { became_zeros.push_back(j_offs); ensure_increasing(became_zeros); } else { m_columns[j].m_values[row_el_iv.m_other].set_value(row_el_iv.m_value); } } } // clean the work vector for (unsigned k = 0; k < prev_size_i0; k++) { m_work_pivot_vector[i0_row_vals[k].m_index] = -1; } for (unsigned k = became_zeros.size(); k-- > 0; ) { unsigned j = became_zeros[k]; remove_element(i0_row_vals, i0_row_vals[j]); if (i0_row_vals.empty()) return false; } if (numeric_traits::precise() == false) set_max_in_row(i0_row_vals); return !i0_row_vals.empty(); } // set the max val as well // returns false if the resulting row is all zeroes, and true otherwise template bool sparse_matrix::set_row_from_work_vector_and_clean_work_vector_not_adjusted(unsigned i0, indexed_vector & work_vec, lp_settings & settings) { remove_zero_elements_and_set_data_on_existing_elements_not_adjusted(i0, work_vec, settings); // all non-zero elements in m_work_pivot_vector are new for (unsigned j : work_vec.m_index) { if (numeric_traits::is_zero(work_vec[j])) { continue; } lean_assert(!settings.abs_val_is_smaller_than_drop_tolerance(work_vec[j])); add_new_element(i0, adjust_column(j), work_vec[j]); work_vec[j] = numeric_traits::zero(); } work_vec.m_index.clear(); auto & row_vals = m_rows[i0]; if (row_vals.size() == 0) { return false; } set_max_in_row(row_vals); // it helps to find larger pivots return true; } template void sparse_matrix::remove_zero_elements_and_set_data_on_existing_elements(unsigned row) { auto & row_vals = m_rows[row]; for (unsigned k = static_cast(row_vals.size()); k-- > 0;) { // we cannot simply run the iterator since we are removing // elements from row_vals auto & row_el_iv = row_vals[k]; unsigned j = row_el_iv.m_index; T & wj = m_work_pivot_vector[j]; if (is_zero(wj)) { remove_element(row_vals, row_el_iv); } else { m_columns[j].m_values[row_el_iv.m_other].set_value(wj); row_el_iv.set_value(wj); wj = zero_of_type(); } } } // work_vec here has not adjusted column indices template void sparse_matrix::remove_zero_elements_and_set_data_on_existing_elements_not_adjusted(unsigned row, indexed_vector & work_vec, lp_settings & settings) { auto & row_vals = m_rows[row]; for (unsigned k = static_cast(row_vals.size()); k-- > 0;) { // we cannot simply run the iterator since we are removing // elements from row_vals auto & row_el_iv = row_vals[k]; unsigned j = row_el_iv.m_index; unsigned rj = adjust_column_inverse(j); T val = work_vec[rj]; if (settings.abs_val_is_smaller_than_drop_tolerance(val)) { remove_element(row_vals, row_el_iv); lean_assert(numeric_traits::is_zero(val)); } else { m_columns[j].m_values[row_el_iv.m_other].set_value(row_el_iv.m_value = val); work_vec[rj] = numeric_traits::zero(); } } } // adding delta columns at the end of the matrix template void sparse_matrix::add_columns_at_the_end(unsigned delta) { for (unsigned i = 0; i < delta; i++) { col_header col_head; m_columns.push_back(col_head); } m_column_permutation.enlarge(delta); } template void sparse_matrix::delete_column(int i) { lean_assert(i < dimension()); for (auto cell = m_columns[i].m_head; cell != nullptr;) { auto next_cell = cell->m_down; kill_cell(cell); cell = next_cell; } } template void sparse_matrix::divide_row_by_constant(unsigned i, const T & t, lp_settings & settings) { lean_assert(!settings.abs_val_is_smaller_than_zero_tolerance(t)); i = adjust_row(i); for (auto & iv : m_rows[i]) { T &v = iv.m_value; v /= t; if (settings.abs_val_is_smaller_than_drop_tolerance(v)){ v = numeric_traits::zero(); } m_columns[iv.m_index].m_values[iv.m_other].set_value(v); } } // solving x * this = y, and putting the answer into y // the matrix here has to be upper triangular template void sparse_matrix::solve_y_U(vector & y) const { // works by rows #ifdef LEAN_DEBUG // T * rs = clone_vector(y, dimension()); #endif unsigned end = dimension(); for (unsigned i = 0; i + 1 < end; i++) { // all y[i] has correct values already const T & yv = y[i]; if (numeric_traits::is_zero(yv)) continue; auto & mc = get_row_values(adjust_row(i)); for (auto & c : mc) { unsigned col = adjust_column_inverse(c.m_index); if (col != i) { y[col] -= c.m_value * yv; } } } #ifdef LEAN_DEBUG // dense_matrix deb(*this); // T * clone_y = clone_vector(y, dimension()); // deb.apply_from_right(clone_y); // lean_assert(vectors_are_equal(rs, clone_y, dimension())); // delete [] clone_y; // delete [] rs; #endif } // solving x * this = y, and putting the answer into y // the matrix here has to be upper triangular template void sparse_matrix::solve_y_U_indexed(indexed_vector & y, const lp_settings & settings) { #if 0 && LEAN_DEBUG vector ycopy(y.m_data); if (numeric_traits::precise() == false) solve_y_U(ycopy); #endif vector sorted_active_columns; extend_and_sort_active_rows(y.m_index, sorted_active_columns); for (unsigned k = sorted_active_columns.size(); k-- > 0; ) { unsigned j = sorted_active_columns[k]; auto & yj = y[j]; for (auto & c: m_columns[adjust_column(j)].m_values) { unsigned i = adjust_row_inverse(c.m_index); if (i == j) continue; yj -= y[i] * c.m_value; } } y.m_index.clear(); for (auto j : sorted_active_columns) { if (!settings.abs_val_is_smaller_than_drop_tolerance(y[j])) y.m_index.push_back(j); else if (!numeric_traits::precise()) y.m_data[j] = zero_of_type(); } lean_assert(y.is_OK()); #if 0 && LEAN_DEBUG if (numeric_traits::precise() == false) lean_assert(vectors_are_equal(ycopy, y.m_data)); #endif } // fills the indices for such that y[i] can be not a zero // sort them so the smaller indices come first // void fill_reachable_indices(std::set & rset, T *y) { // std::queue q; // int m = dimension(); // for (int i = m - 1; i >= 0; i--) { // if (!numeric_traits::is_zero(y[i])){ // for (cell * c = m_columns[adjust_column(i)].m_head; c != nullptr; c = c->m_down) { // unsigned row = adjust_row_inverse(c->m_i); // q.push(row); // } // } // } // while (!q.empty()) { // unsigned i = q.front(); // q.pop(); // for (cell * c = m_columns[adjust_column(i)].m_head; c != nullptr; c = c->m_down) { // unsigned row = adjust_row_inverse(c->m_i); // if (rset.find(row) == rset.end()){ // rset.insert(row); // q.push(row); // } // } // } // } template template void sparse_matrix::find_error_in_solution_U_y(vector& y_orig, vector & y) { unsigned i = dimension(); while (i--) { y_orig[i] -= dot_product_with_row(i, y); } } template template void sparse_matrix::find_error_in_solution_U_y_indexed(indexed_vector& y_orig, indexed_vector & y, const vector& sorted_active_rows) { for (unsigned i: sorted_active_rows) y_orig.add_value_at_index(i, -dot_product_with_row(i, y)); // cannot round up here!!! // y_orig can contain very small values } template template void sparse_matrix::add_delta_to_solution(const vector& del, vector & y) { unsigned i = dimension(); while (i--) { y[i] += del[i]; } } template template void sparse_matrix::add_delta_to_solution(const indexed_vector& del, indexed_vector & y) { // lean_assert(del.is_OK()); // lean_assert(y.is_OK()); for (auto i : del.m_index) { y.add_value_at_index(i, del[i]); } } template template void sparse_matrix::double_solve_U_y(indexed_vector& y, const lp_settings & settings){ lean_assert(y.is_OK()); indexed_vector y_orig(y); // copy y aside vector active_rows; solve_U_y_indexed_only(y, settings, active_rows); lean_assert(y.is_OK()); find_error_in_solution_U_y_indexed(y_orig, y, active_rows); // y_orig contains the error now if (y_orig.m_index.size() * ratio_of_index_size_to_all_size() < 32 * dimension()) { active_rows.clear(); solve_U_y_indexed_only(y_orig, settings, active_rows); add_delta_to_solution(y_orig, y); y.clean_up(); } else { // the dense version solve_U_y(y_orig.m_data); add_delta_to_solution(y_orig.m_data, y.m_data); y.restore_index_and_clean_from_data(); } lean_assert(y.is_OK()); } template template void sparse_matrix::double_solve_U_y(vector& y){ vector y_orig(y); // copy y aside solve_U_y(y); find_error_in_solution_U_y(y_orig, y); // y_orig contains the error now solve_U_y(y_orig); add_delta_to_solution(y_orig, y); } // solving this * x = y, and putting the answer into y // the matrix here has to be upper triangular template template void sparse_matrix::solve_U_y(vector & y) { // it is a column wise version #ifdef LEAN_DEBUG // T * rs = clone_vector(y, dimension()); #endif for (unsigned j = dimension(); j--; ) { const L & yj = y[j]; if (is_zero(yj)) continue; for (const auto & iv : m_columns[adjust_column(j)].m_values) { unsigned i = adjust_row_inverse(iv.m_index); if (i != j) { y[i] -= iv.m_value * yj; } } } #ifdef LEAN_DEBUG // dense_matrix deb(*this); // T * clone_y = clone_vector(y, dimension()); // deb.apply_from_left(clone_y); // lean_assert(vectors_are_equal(rs, clone_y, dimension())); #endif } template void sparse_matrix::process_index_recursively_for_y_U(unsigned j, vector & sorted_active_rows) { lean_assert(m_processed[j] == false); m_processed[j]=true; auto & row = m_rows[adjust_row(j)]; for (auto & c : row) { unsigned i = adjust_column_inverse(c.m_index); if (i == j) continue; if (!m_processed[i]) { process_index_recursively_for_y_U(i, sorted_active_rows); } } sorted_active_rows.push_back(j); } template void sparse_matrix::process_column_recursively(unsigned j, vector & sorted_active_rows) { lean_assert(m_processed[j] == false); auto & mc = m_columns[adjust_column(j)].m_values; for (auto & iv : mc) { unsigned i = adjust_row_inverse(iv.m_index); if (i == j) continue; if (!m_processed[i]) { process_column_recursively(i, sorted_active_rows); } } m_processed[j]=true; sorted_active_rows.push_back(j); } template void sparse_matrix::create_graph_G(const vector & index_or_right_side, vector & sorted_active_rows) { for (auto i : index_or_right_side) { if (m_processed[i]) continue; process_column_recursively(i, sorted_active_rows); } for (auto i : sorted_active_rows) { m_processed[i] = false; } } template void sparse_matrix::extend_and_sort_active_rows(const vector & index_or_right_side, vector & sorted_active_rows) { for (auto i : index_or_right_side) { if (m_processed[i]) continue; process_index_recursively_for_y_U(i, sorted_active_rows); } for (auto i : sorted_active_rows) { m_processed[i] = false; } } template template void sparse_matrix::solve_U_y_indexed_only(indexed_vector & y, const lp_settings & settings, vector & sorted_active_rows) { // it is a column wise version create_graph_G(y.m_index, sorted_active_rows); for (auto k = sorted_active_rows.size(); k-- > 0;) { unsigned j = sorted_active_rows[k]; const L & yj = y[j]; if (is_zero(yj)) continue; auto & mc = m_columns[adjust_column(j)].m_values; for (auto & iv : mc) { unsigned i = adjust_row_inverse(iv.m_index); if (i != j) { y[i] -= iv.m_value * yj; } } } y.m_index.clear(); for (auto j : sorted_active_rows) { if (!settings.abs_val_is_smaller_than_drop_tolerance(y[j])) y.m_index.push_back(j); else if (!numeric_traits::precise()) y[j] = zero_of_type(); } lean_assert(y.is_OK()); #ifdef LEAN_DEBUG // dense_matrix deb(this); // vector clone_y(y.m_data); // deb.apply_from_left(clone_y); // lean_assert(vectors_are_equal(rs, clone_y)); #endif } template template L sparse_matrix::dot_product_with_row (unsigned row, const vector & y) const { L ret = zero_of_type(); auto & mc = get_row_values(adjust_row(row)); for (auto & c : mc) { unsigned col = m_column_permutation[c.m_index]; ret += c.m_value * y[col]; } return ret; } template template L sparse_matrix::dot_product_with_row (unsigned row, const indexed_vector & y) const { L ret = zero_of_type(); auto & mc = get_row_values(adjust_row(row)); for (auto & c : mc) { unsigned col = m_column_permutation[c.m_index]; ret += c.m_value * y[col]; } return ret; } template unsigned sparse_matrix::get_number_of_nonzeroes() const { unsigned ret = 0; for (unsigned i = dimension(); i--; ) { ret += number_of_non_zeroes_in_row(i); } return ret; } template bool sparse_matrix::get_non_zero_column_in_row(unsigned i, unsigned *j) const { // go over the i-th row auto & mc = get_row_values(adjust_row(i)); if (mc.size() > 0) { *j = m_column_permutation[mc[0].m_index]; return true; } return false; } template void sparse_matrix::remove_element_that_is_not_in_w(vector> & column_vals, indexed_value & col_el_iv) { auto & row_chunk = m_rows[col_el_iv.m_index]; indexed_value & row_el_iv = row_chunk[col_el_iv.m_other]; unsigned index_in_row = col_el_iv.m_other; remove_element(row_chunk, col_el_iv.m_other, column_vals, row_el_iv.m_other); if (index_in_row == 0) set_max_in_row(row_chunk); } // w contains the new column // the old column inside of the matrix has not been changed yet template void sparse_matrix::remove_elements_that_are_not_in_w_and_update_common_elements(unsigned column_to_replace, indexed_vector & w) { // -------------------------------- // column_vals represents the old column auto & column_vals = m_columns[column_to_replace].m_values; for (unsigned k = static_cast(column_vals.size()); k-- > 0;) { indexed_value & col_el_iv = column_vals[k]; unsigned i = col_el_iv.m_index; T &w_data_at_i = w[adjust_row_inverse(i)]; if (numeric_traits::is_zero(w_data_at_i)) { remove_element_that_is_not_in_w(column_vals, col_el_iv); } else { auto& row_chunk = m_rows[i]; unsigned index_in_row = col_el_iv.m_other; if (index_in_row == 0) { bool look_for_max = abs(w_data_at_i) < abs(row_chunk[0].m_value); row_chunk[0].set_value(col_el_iv.m_value = w_data_at_i); if (look_for_max) set_max_in_row(i); } else { row_chunk[index_in_row].set_value(col_el_iv.m_value = w_data_at_i); if (abs(w_data_at_i) > abs(row_chunk[0].m_value)) put_max_index_to_0(row_chunk, index_in_row); } w_data_at_i = numeric_traits::zero(); } } } template void sparse_matrix::add_new_element(unsigned row, unsigned col, const T& val) { auto & row_vals = m_rows[row]; auto & col_vals = m_columns[col].m_values; unsigned row_el_offs = static_cast(row_vals.size()); unsigned col_el_offs = static_cast(col_vals.size()); row_vals.push_back(indexed_value(val, col, col_el_offs)); col_vals.push_back(indexed_value(val, row, row_el_offs)); m_n_of_active_elems++; } // w contains the "rest" of the new column; all common elements of w and the old column has been zeroed // the old column inside of the matrix has not been changed yet template void sparse_matrix::add_new_elements_of_w_and_clear_w(unsigned column_to_replace, indexed_vector & w, lp_settings & settings) { for (unsigned i : w.m_index) { T w_at_i = w[i]; if (numeric_traits::is_zero(w_at_i)) continue; // was dealt with already if (!settings.abs_val_is_smaller_than_drop_tolerance(w_at_i)) { unsigned ai = adjust_row(i); add_new_element(ai, column_to_replace, w_at_i); auto & row_chunk = m_rows[ai]; lean_assert(row_chunk.size() > 0); if (abs(w_at_i) > abs(row_chunk[0].m_value)) put_max_index_to_0(row_chunk, static_cast(row_chunk.size()) - 1); } w[i] = numeric_traits::zero(); } w.m_index.clear(); } template void sparse_matrix::replace_column(unsigned column_to_replace, indexed_vector & w, lp_settings &settings) { column_to_replace = adjust_column(column_to_replace); remove_elements_that_are_not_in_w_and_update_common_elements(column_to_replace, w); add_new_elements_of_w_and_clear_w(column_to_replace, w, settings); } template unsigned sparse_matrix::pivot_score(unsigned i, unsigned j) { // It goes like this (rnz-1)(cnz-1) is the Markovitz number, that is the max number of // new non zeroes we can obtain after the pivoting. // In addition we will get another cnz - 1 elements in the eta matrix created for this pivot, // which gives rnz(cnz-1). For example, is 0 for a column singleton, but not for // a row singleton ( which is not a column singleton). auto col_header = m_columns[j]; return static_cast(get_row_values(i).size() * (col_header.m_values.size() - col_header.m_shortened_markovitz - 1)); } template void sparse_matrix::enqueue_domain_into_pivot_queue() { lean_assert(m_pivot_queue.size() == 0); for (unsigned i = 0; i < dimension(); i++) { auto & rh = m_rows[i]; unsigned rnz = static_cast(rh.size()); for (auto iv : rh) { unsigned j = iv.m_index; m_pivot_queue.enqueue(i, j, rnz * (static_cast(m_columns[j].m_values.size()) - 1)); } } } template void sparse_matrix::set_max_in_rows() { unsigned i = dimension(); while (i--) set_max_in_row(i); } template void sparse_matrix::zero_shortened_markovitz_numbers() { for (auto & ch : m_columns) ch.zero_shortened_markovitz(); } template void sparse_matrix::prepare_for_factorization() { zero_shortened_markovitz_numbers(); set_max_in_rows(); enqueue_domain_into_pivot_queue(); } template void sparse_matrix::recover_pivot_queue(vector & rejected_pivots) { for (auto p : rejected_pivots) { m_pivot_queue.enqueue(p.first, p.second, pivot_score(p.first, p.second)); } } template int sparse_matrix::elem_is_too_small(unsigned i, unsigned j, int c_partial_pivoting) { auto & row_chunk = m_rows[i]; if (j == row_chunk[0].m_index) { return 0; // the max element is at the head } T max = abs(row_chunk[0].m_value); for (unsigned k = 1; k < row_chunk.size(); k++) { auto &iv = row_chunk[k]; if (iv.m_index == j) return abs(iv.m_value) * c_partial_pivoting < max ? 1: 0; } return 2; // the element became zero but it still sits in the active pivots? } template bool sparse_matrix::remove_row_from_active_pivots_and_shorten_columns(unsigned row) { unsigned arow = adjust_row(row); for (auto & iv : m_rows[arow]) { m_pivot_queue.remove(arow, iv.m_index); m_n_of_active_elems--; // the value is correct only when refactoring if (adjust_column_inverse(iv.m_index) <= row) continue; // this column will be removed anyway auto & col = m_columns[iv.m_index]; col.shorten_markovich_by_one(); if (col.m_values.size() <= col.m_shortened_markovitz) return false; // got a zero column } return true; } template void sparse_matrix::remove_pivot_column(unsigned row) { unsigned acol = adjust_column(row); for (const auto & iv : m_columns[acol].m_values) if (adjust_row_inverse(iv.m_index) >= row) m_pivot_queue.remove(iv.m_index, acol); } template void sparse_matrix::update_active_pivots(unsigned row) { unsigned arow = adjust_row(row); for (const auto & iv : m_rows[arow]) { col_header & ch = m_columns[iv.m_index]; int cols = static_cast(ch.m_values.size()) - ch.m_shortened_markovitz - 1; lean_assert(cols >= 0); for (const auto &ivc : ch.m_values) { unsigned i = ivc.m_index; if (adjust_row_inverse(i) <= row) continue; // the i is not an active row m_pivot_queue.enqueue(i, iv.m_index, static_cast(m_rows[i].size())*cols); } } } template bool sparse_matrix::shorten_active_matrix(unsigned row, eta_matrix *eta_matrix) { if (!remove_row_from_active_pivots_and_shorten_columns(row)) return false; remove_pivot_column(row); // need to know the max priority of the queue here update_active_pivots(row); if (eta_matrix == nullptr) return true; // it looks like double work, but the pivot scores have changed for all rows // touched by eta_matrix for (auto & it : eta_matrix->m_column_vector.m_data) { unsigned row = adjust_row(it.first); const auto & row_values = m_rows[row]; unsigned rnz = static_cast(row_values.size()); for (auto & iv : row_values) { const col_header& ch = m_columns[iv.m_index]; int cnz = static_cast(ch.m_values.size()) - ch.m_shortened_markovitz - 1; lean_assert(cnz >= 0); m_pivot_queue.enqueue(row, iv.m_index, rnz * cnz); } } return true; } template unsigned sparse_matrix::pivot_score_without_shortened_counters(unsigned i, unsigned j, unsigned k) { auto &cols = m_columns[j].m_values; unsigned cnz = cols.size(); for (auto & iv : cols) { if (adjust_row_inverse(iv.m_index) < k) cnz--; } lean_assert(cnz > 0); return m_rows[i].m_values.size() * (cnz - 1); } #ifdef LEAN_DEBUG template bool sparse_matrix::can_improve_score_for_row(unsigned row, unsigned score, T const & c_partial_pivoting, unsigned k) { unsigned arow = adjust_row(row); auto & row_vals = m_rows[arow].m_values; auto & begin_iv = row_vals[0]; T row_max = abs(begin_iv.m_value); lean_assert(adjust_column_inverse(begin_iv.m_index) >= k); if (pivot_score_without_shortened_counters(arow, begin_iv.m_index, k) < score) { print_active_matrix(k); return true; } for (unsigned jj = 1; jj < row_vals.size(); jj++) { auto & iv = row_vals[jj]; lean_assert(adjust_column_inverse(iv.m_index) >= k); lean_assert(abs(iv.m_value) <= row_max); if (c_partial_pivoting * abs(iv.m_value) < row_max) continue; if (pivot_score_without_shortened_counters(arow, iv.m_index, k) < score) { print_active_matrix(k); return true; } } return false; } template bool sparse_matrix::really_best_pivot(unsigned i, unsigned j, T const & c_partial_pivoting, unsigned k) { unsigned queue_pivot_score = pivot_score_without_shortened_counters(i, j, k); for (unsigned ii = k; ii < dimension(); ii++) { lean_assert(!can_improve_score_for_row(ii, queue_pivot_score, c_partial_pivoting, k)); } return true; } template void sparse_matrix::print_active_matrix(unsigned k, std::ostream & out) { out << "active matrix for k = " << k << std::endl; if (k >= dimension()) { out << "empty" << std::endl; return; } unsigned dim = dimension() - k; dense_matrix b(dim, dim); for (unsigned i = 0; i < dim; i++) for (unsigned j = 0; j < dim; j++ ) b.set_elem(i, j, zero_of_type()); for (int i = k; i < dimension(); i++) { unsigned col = adjust_column(i); for (auto &iv : get_column_values(col)) { unsigned row = iv.m_index; unsigned row_ex = this->adjust_row_inverse(row); if (row_ex < k) continue; auto v = this->get_not_adjusted(row, col); b.set_elem(row_ex - k, i -k, v); } } print_matrix(b, out); } template bool sparse_matrix::pivot_queue_is_correct_for_row(unsigned i, unsigned k) { unsigned arow = adjust_row(i); for (auto & iv : m_rows[arow].m_values) { lean_assert(pivot_score_without_shortened_counters(arow, iv.m_index, k + 1) == m_pivot_queue.get_priority(arow, iv.m_index)); } return true; } template bool sparse_matrix::pivot_queue_is_correct_after_pivoting(int k) { for (unsigned i = k + 1; i < dimension(); i++ ) lean_assert(pivot_queue_is_correct_for_row(i, k)); lean_assert(m_pivot_queue.is_correct()); return true; } #endif template bool sparse_matrix::get_pivot_for_column(unsigned &i, unsigned &j, int c_partial_pivoting, unsigned k) { vector pivots_candidates_that_are_too_small; while (!m_pivot_queue.is_empty()) { m_pivot_queue.dequeue(i, j); unsigned i_inv = adjust_row_inverse(i); if (i_inv < k) continue; unsigned j_inv = adjust_column_inverse(j); if (j_inv < k) continue; int _small = elem_is_too_small(i, j, c_partial_pivoting); if (!_small) { #ifdef LEAN_DEBUG // if (!really_best_pivot(i, j, c_partial_pivoting, k)) { // print_active_matrix(k); // lean_assert(false); // } #endif recover_pivot_queue(pivots_candidates_that_are_too_small); i = i_inv; j = j_inv; return true; } if (_small != 2) { // 2 means that the pair is not in the matrix pivots_candidates_that_are_too_small.push_back(std::make_pair(i, j)); } } recover_pivot_queue(pivots_candidates_that_are_too_small); return false; } template bool sparse_matrix::elem_is_too_small(vector> & row_chunk, indexed_value & iv, int c_partial_pivoting) { if (&iv == &row_chunk[0]) { return false; // the max element is at the head } T val = abs(iv.m_value); T max = abs(row_chunk[0].m_value); return val * c_partial_pivoting < max; } template bool sparse_matrix::shorten_columns_by_pivot_row(unsigned i, unsigned pivot_column) { vector> & row_chunk = get_row_values(i); for (indexed_value & iv : row_chunk) { unsigned j = iv.m_index; if (j == pivot_column) { lean_assert(!col_is_active(j)); continue; } m_columns[j].shorten_markovich_by_one(); if (m_columns[j].m_shortened_markovitz >= get_column_values(j).size()) { // got the zero column under the row! return false; } } return true; } template bool sparse_matrix::fill_eta_matrix(unsigned j, eta_matrix ** eta) { const vector> & col_chunk = get_column_values(adjust_column(j)); bool is_unit = true; for (const auto & iv : col_chunk) { unsigned i = adjust_row_inverse(iv.m_index); if (i > j) { is_unit = false; break; } if (i == j && iv.m_value != 1) { is_unit = false; break; } } if (is_unit) { *eta = nullptr; return true; } #ifdef LEAN_DEBUG *eta = new eta_matrix(j, dimension()); #else *eta = new eta_matrix(j); #endif for (const auto & iv : col_chunk) { unsigned i = adjust_row_inverse(iv.m_index); if (i < j) { continue; } if (i > j) { (*eta)->push_back(i, - iv.m_value); } else { // i == j if ( !(*eta)->set_diagonal_element(iv.m_value)) { delete *eta; *eta = nullptr; return false; } } } (*eta)->divide_by_diagonal_element(); return true; } #ifdef LEAN_DEBUG template bool sparse_matrix::is_upper_triangular_and_maximums_are_set_correctly_in_rows(lp_settings & settings) const { for (unsigned i = 0; i < dimension(); i++) { vector> const & row_chunk = get_row_values(i); lean_assert(row_chunk.size()); T const & max = abs(row_chunk[0].m_value); unsigned ai = adjust_row_inverse(i); for (auto & iv : row_chunk) { lean_assert(abs(iv.m_value) <= max); unsigned aj = adjust_column_inverse(iv.m_index); if (!(ai <= aj || numeric_traits::is_zero(iv.m_value))) return false; if (aj == ai) { if (iv.m_value != 1) { // std::cout << "value at diagonal = " << iv.m_value << std::endl; return false; } } if (settings.abs_val_is_smaller_than_drop_tolerance(iv.m_value) && (!is_zero(iv.m_value))) return false; } } return true; } template bool sparse_matrix::is_upper_triangular_until(unsigned k) const { for (unsigned j = 0; j < dimension() && j < k; j++) { unsigned aj = adjust_column(j); auto & col = get_column_values(aj); for (auto & iv : col) { unsigned row = adjust_row_inverse(iv.m_index); if (row > j) return false; } } return true; } template void sparse_matrix::check_column_vs_rows(unsigned col) { auto & mc = get_column_values(col); for (indexed_value & column_iv : mc) { indexed_value & row_iv = column_iv_other(column_iv); if (row_iv.m_index != col) { // std::cout << "m_other in row does not belong to column " << col << ", but to column " << row_iv.m_index << std::endl; lean_assert(false); } if (& row_iv_other(row_iv) != &column_iv) { // std::cout << "row and col do not point to each other" << std::endl; lean_assert(false); } if (row_iv.m_value != column_iv.m_value) { // std::cout << "the data from col " << col << " for row " << column_iv.m_index << " is different in the column " << std::endl; // std::cout << "in the col it is " << column_iv.m_value << ", but in the row it is " << row_iv.m_value << std::endl; lean_assert(false); } } } template void sparse_matrix::check_row_vs_columns(unsigned row) { auto & mc = get_row_values(row); for (indexed_value & row_iv : mc) { indexed_value & column_iv = row_iv_other(row_iv); if (column_iv.m_index != row) { // std::cout << "col_iv does not point to correct row " << row << " but to " << column_iv.m_index << std::endl; lean_assert(false); } if (& row_iv != & column_iv_other(column_iv)) { // std::cout << "row and col do not point to each other" << std::endl; lean_assert(false); } if (row_iv.m_value != column_iv.m_value) { // std::cout << "the data from col " << column_iv.m_index << " for row " << row << " is different in the column " << std::endl; // std::cout << "in the col it is " << column_iv.m_value << ", but in the row it is " << row_iv.m_value << std::endl; lean_assert(false); } } } template void sparse_matrix::check_rows_vs_columns() { for (unsigned i = 0; i < dimension(); i++) { check_row_vs_columns(i); } } template void sparse_matrix::check_columns_vs_rows() { for (unsigned i = 0; i < dimension(); i++) { check_column_vs_rows(i); } } template void sparse_matrix::check_matrix() { check_rows_vs_columns(); check_columns_vs_rows(); } #endif }