diff --git a/CMakeLists.txt b/CMakeLists.txt index d1cfa8ee3..603e86ee1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -4,7 +4,7 @@ cmake_minimum_required(VERSION 3.16) set(CMAKE_USER_MAKE_RULES_OVERRIDE_CXX "${CMAKE_CURRENT_SOURCE_DIR}/cmake/cxx_compiler_flags_overrides.cmake") # Read version from VERSION.txt file -file(READ "${CMAKE_CURRENT_SOURCE_DIR}/VERSION.txt" Z3_VERSION_FROM_FILE) +file(READ "${CMAKE_CURRENT_SOURCE_DIR}/scripts/VERSION.txt" Z3_VERSION_FROM_FILE) string(STRIP "${Z3_VERSION_FROM_FILE}" Z3_VERSION_FROM_FILE) project(Z3 VERSION ${Z3_VERSION_FROM_FILE} LANGUAGES CXX) diff --git a/MODULE.bazel b/MODULE.bazel index c368221da..48848d27e 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -1,6 +1,6 @@ module( name = "z3", - version = "4.16.0", # TODO: Read from VERSION.txt - currently manual sync required + version = "4.15.4", # TODO: Read from VERSION.txt - currently manual sync required bazel_compatibility = [">=7.0.0"], ) diff --git a/PARALLEL_PROJECT_NOTES.md b/PARALLEL_PROJECT_NOTES.md index 215d9bda0..9b53745c5 100644 --- a/PARALLEL_PROJECT_NOTES.md +++ b/PARALLEL_PROJECT_NOTES.md @@ -302,6 +302,13 @@ threads-4-cube threads-4-cube-shareconflicts -T:30 smt.threads=4 tactic.default_tactic=smt smt_parallel.never_cube=false smt_parallel.frugal_cube_only=false smt_parallel.share_conflicts=true smt_parallel.share_units=false + +threads-4-cube-maxdepth-10 +-T:30 smt.threads=4 tactic.default_tactic=smt smt_parallel.share_conflicts=false smt_parallel.share_units=false smt_parallel.max_cube_depth=10 + +threads-4-cube-shareconflicts +-T:30 smt.threads=4 tactic.default_tactic=smt smt_parallel.share_conflicts=true smt_parallel.share_units=false smt_parallel.frugal_depth_splitting_only=true + Ideas for other knobs that can be tested diff --git a/README-CMake.md b/README-CMake.md index e182a7f94..bf06116fc 100644 --- a/README-CMake.md +++ b/README-CMake.md @@ -119,6 +119,30 @@ target_link_libraries(yourTarget libz3) ``` Note that this is `libz3` not `z3` (`libz3` refers to the library target from `src/CMakeLists.txt`). +#### Using system-installed Z3 + +If you have Z3 installed on your system (e.g., via package manager or by building and installing Z3 yourself), you can use CMake's `find_package` to locate it: + +```cmake +find_package(Z3 REQUIRED CONFIG) +``` + +Once found, you can use the Z3 include directories and libraries: + +```cmake +# For C projects +target_include_directories(yourTarget PRIVATE ${Z3_C_INCLUDE_DIRS}) +target_link_libraries(yourTarget PRIVATE ${Z3_LIBRARIES}) + +# For C++ projects +target_include_directories(yourTarget PRIVATE ${Z3_CXX_INCLUDE_DIRS}) +target_link_libraries(yourTarget PRIVATE ${Z3_LIBRARIES}) +``` + +The `find_package(Z3 CONFIG)` approach uses Z3's provided `Z3Config.cmake` file, which is installed to a standard location (typically `/lib/cmake/z3/`). If CMake cannot automatically find Z3, you can help it by setting `-DZ3_DIR=` where `` is the directory containing the `Z3Config.cmake` file. + +**Note**: This approach requires that Z3 was built and installed using CMake. Z3 installations from the Python build system may not provide the necessary CMake configuration files. + ### Ninja diff --git a/VERSION.txt b/scripts/VERSION.txt similarity index 100% rename from VERSION.txt rename to scripts/VERSION.txt diff --git a/scripts/mk_nuget_task.py b/scripts/mk_nuget_task.py index b85ec4c8e..d854adad4 100644 --- a/scripts/mk_nuget_task.py +++ b/scripts/mk_nuget_task.py @@ -103,7 +103,7 @@ def mk_targets(source_root): def mk_icon(source_root): mk_dir("out/content") shutil.copy(f"{source_root}/resources/icon.jpg", "out/content/icon.jpg") -# shutil.copy(f"{source_root}/src/api/dotnet/README.md", "out/content/README.md") + shutil.copy(f"{source_root}/src/api/dotnet/README.md", "out/content/README.md") @@ -124,6 +124,7 @@ Linux Dependencies: © Microsoft Corporation. All rights reserved. smt constraint solver theorem prover content/icon.jpg + content/README.md https://github.com/Z3Prover/z3 MIT diff --git a/scripts/mk_project.py b/scripts/mk_project.py index da73e2daf..7b4d444ea 100644 --- a/scripts/mk_project.py +++ b/scripts/mk_project.py @@ -9,7 +9,7 @@ from mk_util import * def init_version(): # Read version from VERSION.txt file - version_file_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'VERSION.txt') + version_file_path = os.path.join(os.path.dirname(__file__), 'VERSION.txt') try: with open(version_file_path, 'r') as f: version_str = f.read().strip() diff --git a/scripts/nightly.yaml b/scripts/nightly.yaml index 5323f8bc0..d051cb0ee 100644 --- a/scripts/nightly.yaml +++ b/scripts/nightly.yaml @@ -1,8 +1,8 @@ variables: # Version components read from VERSION.txt (updated manually when VERSION.txt changes) Major: '4' - Minor: '16' - Patch: '0' + Minor: '15' + Patch: '4' ReleaseVersion: $(Major).$(Minor).$(Patch) AssemblyVersion: $(Major).$(Minor).$(Patch).$(Build.BuildId) NightlyVersion: $(AssemblyVersion)-$(Build.buildId) @@ -254,9 +254,9 @@ stages: inputs: artifact: 'MacArm64' path: $(Agent.TempDirectory)\package - - task: NuGetToolInstaller@0 + - task: NuGetToolInstaller@1 inputs: - versionSpec: 5.x + versionSpec: 6.x checkLatest: false - task: PythonScript@0 displayName: 'Python: assemble files' @@ -302,9 +302,9 @@ stages: inputs: artifact: 'WindowsBuild-x86' path: $(Agent.TempDirectory)\package - - task: NuGetToolInstaller@0 + - task: NuGetToolInstaller@1 inputs: - versionSpec: 5.x + versionSpec: 6.x checkLatest: false - task: PythonScript@0 displayName: 'Python: assemble files' diff --git a/scripts/release.yml b/scripts/release.yml index 2a6cd363d..b26539c35 100644 --- a/scripts/release.yml +++ b/scripts/release.yml @@ -6,7 +6,7 @@ trigger: none variables: - ReleaseVersion: '4.16.0' # TODO: Auto-read from VERSION.txt when Azure DevOps supports it better + ReleaseVersion: '4.15.4' # TODO: Auto-read from VERSION.txt when Azure DevOps supports it better stages: @@ -261,9 +261,9 @@ stages: artifact: 'MacArm64' path: $(Agent.TempDirectory)\package - - task: NuGetToolInstaller@0 + - task: NuGetToolInstaller@1 inputs: - versionSpec: 5.x + versionSpec: 6.x checkLatest: false - task: PythonScript@0 displayName: 'Python: assemble files' @@ -305,9 +305,9 @@ stages: inputs: artifact: 'WindowsBuild-x86' path: $(Agent.TempDirectory)\package - - task: NuGetToolInstaller@0 + - task: NuGetToolInstaller@1 inputs: - versionSpec: 5.x + versionSpec: 6.x checkLatest: false - task: PythonScript@0 displayName: 'Python: assemble files' @@ -471,7 +471,7 @@ stages: - job: NuGetPublish - condition: eq(1,0) + condition: eq(1,1) displayName: "Publish to NuGet.org" steps: - task: DownloadPipelineArtifact@2 @@ -479,9 +479,9 @@ stages: inputs: artifact: 'NuGetPackage' path: $(Agent.TempDirectory) - - task: NuGetToolInstaller@0 + - task: NuGetToolInstaller@1 inputs: - versionSpec: 5.x + versionSpec: 6.x checkLatest: false - task: NuGetCommand@2 inputs: diff --git a/scripts/update_version.py b/scripts/update_version.py index fcc3c5576..f33b20655 100755 --- a/scripts/update_version.py +++ b/scripts/update_version.py @@ -16,7 +16,7 @@ import sys def read_version(): """Read version from VERSION.txt file.""" script_dir = os.path.dirname(os.path.abspath(__file__)) - version_file = os.path.join(os.path.dirname(script_dir), 'VERSION.txt') + version_file = os.path.join(script_dir, 'VERSION.txt') try: with open(version_file, 'r') as f: diff --git a/src/api/julia/CMakeLists.txt b/src/api/julia/CMakeLists.txt index fe27caa95..952b33fb5 100644 --- a/src/api/julia/CMakeLists.txt +++ b/src/api/julia/CMakeLists.txt @@ -1,5 +1,32 @@ find_package(JlCxx REQUIRED) +# Check for Windows MSVC + MinGW library compatibility issues +if(WIN32 AND CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + # Get the JlCxx library path to check its format + get_target_property(JLCXX_LIB_PATH JlCxx::cxxwrap_julia IMPORTED_LOCATION) + if(NOT JLCXX_LIB_PATH) + get_target_property(JLCXX_LIB_PATH JlCxx::cxxwrap_julia IMPORTED_LOCATION_RELEASE) + endif() + if(NOT JLCXX_LIB_PATH) + get_target_property(JLCXX_LIB_PATH JlCxx::cxxwrap_julia IMPORTED_IMPLIB) + endif() + if(NOT JLCXX_LIB_PATH) + get_target_property(JLCXX_LIB_PATH JlCxx::cxxwrap_julia IMPORTED_IMPLIB_RELEASE) + endif() + + if(JLCXX_LIB_PATH AND JLCXX_LIB_PATH MATCHES "\\.dll\\.a$") + message(FATAL_ERROR + "Julia bindings build error: Incompatible CxxWrap library format detected.\n" + "The found libcxxwrap_julia library (${JLCXX_LIB_PATH}) is a MinGW import library (.dll.a), " + "but Z3 is being built with MSVC which requires .lib format.\n\n" + "Solutions:\n" + "1. Use MinGW/GCC instead of MSVC to build Z3\n" + "2. Install a MSVC-compatible version of CxxWrap\n" + "3. Disable Julia bindings with -DZ3_BUILD_JULIA_BINDINGS=OFF\n\n" + "For more information, see: https://github.com/JuliaInterop/CxxWrap.jl#compiling-the-c-code") + endif() +endif() + add_library(z3jl SHARED z3jl.cpp) target_link_libraries(z3jl PRIVATE JlCxx::cxxwrap_julia libz3) target_include_directories(z3jl PRIVATE diff --git a/src/api/python/setup.py b/src/api/python/setup.py index 6b400c7d4..39ae7df72 100644 --- a/src/api/python/setup.py +++ b/src/api/python/setup.py @@ -113,14 +113,21 @@ def _clean_native_build(): def _z3_version(): post = os.getenv('Z3_VERSION_SUFFIX', '') + print("z3_version", "release dir", RELEASE_DIR) if RELEASE_DIR is None: - fn = os.path.join(SRC_DIR_REPO, 'VERSION.txt') - if os.path.exists(fn): - with open(fn) as f: - for line in f: - n = re.match(r"(.*)\.(.*)\.(.*)\.(.*)", line) - if not n is None: - return n.group(1) + '.' + n.group(2) + '.' + n.group(3) + '.' + n.group(4) + post + dirs = [SRC_DIR, ROOT_DIR, SRC_DIR_REPO, SRC_DIR_LOCAL, os.path.join(ROOT_DIR, '..', '..')] + for d in dirs: + if os.path.exists(d): + print(d, ": ", os.listdir(d)) + fns = [os.path.join(d, 'scripts', 'VERSION.txt') for d in dirs] + for fn in fns: + print("loading version file", fn, "exists", os.path.exists(fn)) + if os.path.exists(fn): + with open(fn) as f: + for line in f: + n = re.match(r"(.*)\.(.*)\.(.*)\.(.*)", line) + if not n is None: + return n.group(1) + '.' + n.group(2) + '.' + n.group(3) + '.' + n.group(4) + post return "?.?.?.?" else: version = RELEASE_METADATA[0] diff --git a/src/params/smt_parallel_params.pyg b/src/params/smt_parallel_params.pyg index 53892b066..dde9efd4d 100644 --- a/src/params/smt_parallel_params.pyg +++ b/src/params/smt_parallel_params.pyg @@ -11,4 +11,7 @@ def_module_params('smt_parallel', ('share_units_initial_only', BOOL, False, 'share only initial Boolean atoms as units'), ('cube_initial_only', BOOL, False, 'cube only on initial Boolean atoms'), ('max_cube_size', UINT, 20, 'maximum size of a cube to share'), + ('max_greedy_cubes', UINT, 1000, 'maximum number of cube to greedily share before switching to frugal'), + ('num_split_lits', UINT, 2, 'how many literals, k, we split on to create 2^k cubes'), + ('frugal_deepest_cube_only', BOOL, False, 'only apply frugal cube strategy, and only on a deepest (biggest) cube from the batch manager'), )) diff --git a/src/smt/smt_context.h b/src/smt/smt_context.h index 63316a331..8533f0d50 100644 --- a/src/smt/smt_context.h +++ b/src/smt/smt_context.h @@ -191,7 +191,7 @@ namespace smt { unsigned_vector m_lit_occs; //!< occurrence count of literals svector m_bdata; //!< mapping bool_var -> data svector m_activity; - updatable_priority_queue::priority_queue m_pq_scores; + // updatable_priority_queue::priority_queue m_pq_scores; struct lit_node : dll_base { literal lit; @@ -199,7 +199,6 @@ namespace smt { }; lit_node* m_dll_lits; - // svector> m_lit_scores; svector m_lit_scores[2]; clause_vector m_aux_clauses; @@ -952,7 +951,7 @@ namespace smt { e = 0; for (auto& e : m_lit_scores[1]) e = 0; - m_pq_scores.clear(); // Clear the priority queue heap as well + // m_pq_scores.clear(); // Clear the priority queue heap as well } double get_score(literal l) const { return m_lit_scores[l.sign()][l.var()]; diff --git a/src/smt/smt_internalizer.cpp b/src/smt/smt_internalizer.cpp index c7e257fac..1438a41f1 100644 --- a/src/smt/smt_internalizer.cpp +++ b/src/smt/smt_internalizer.cpp @@ -1532,14 +1532,6 @@ namespace smt { }} } - // void context::add_scores(unsigned n, literal const* lits) { - // for (unsigned i = 0; i < n; ++i) { - // auto lit = lits[i]; - // unsigned v = lit.var(); - // m_lit_scores[v][lit.sign()] += 1.0 / n; - // } - // } - void context::add_scores(unsigned n, literal const* lits) { for (unsigned i = 0; i < n; ++i) { auto lit = lits[i]; @@ -1547,8 +1539,8 @@ namespace smt { m_lit_scores[lit.sign()][v] += 1.0 / n; - auto new_score = m_lit_scores[0][v] * m_lit_scores[1][v]; - m_pq_scores.set(v, new_score); + // auto new_score = m_lit_scores[0][v] * m_lit_scores[1][v]; + // m_pq_scores.set(v, new_score); } } diff --git a/src/smt/smt_parallel.cpp b/src/smt/smt_parallel.cpp index 47c1a5fd2..650464c68 100644 --- a/src/smt/smt_parallel.cpp +++ b/src/smt/smt_parallel.cpp @@ -137,6 +137,8 @@ namespace smt { m_config.m_share_units_initial_only = pp.share_units_initial_only(); m_config.m_cube_initial_only = pp.cube_initial_only(); m_config.m_max_conflict_mul = pp.max_conflict_mul(); + m_config.m_max_greedy_cubes = pp.max_greedy_cubes(); + m_config.m_num_split_lits = pp.num_split_lits(); // don't share initial units ctx->pop_to_base_lvl(); @@ -272,20 +274,37 @@ namespace smt { void parallel::batch_manager::get_cubes(ast_translation& g2l, vector& cubes) { std::scoped_lock lock(mux); - if (m_cubes.size() == 1 && m_cubes[0].size() == 0) { + if (m_cubes.size() == 1 && m_cubes[0].empty()) { // special initialization: the first cube is emtpy, have the worker work on an empty cube. cubes.push_back(expr_ref_vector(g2l.to())); return; } for (unsigned i = 0; i < std::min(m_max_batch_size / p.num_threads, (unsigned)m_cubes.size()) && !m_cubes.empty(); ++i) { - auto& cube = m_cubes.back(); - expr_ref_vector l_cube(g2l.to()); - for (auto& e : cube) { - l_cube.push_back(g2l(e)); + if (m_config.m_frugal_deepest_cube_only) { + // get the deepest set of cubes + auto& deepest_cubes = m_cubes_depth_sets.rbegin()->second; + unsigned idx = rand() % deepest_cubes.size(); + auto& cube = deepest_cubes[idx]; // get a random cube from the deepest set + + expr_ref_vector l_cube(g2l.to()); + for (auto& e : cube) { + l_cube.push_back(g2l(e)); + } + cubes.push_back(l_cube); + + deepest_cubes.erase(deepest_cubes.begin() + idx); // remove the cube from the set + if (deepest_cubes.empty()) + m_cubes_depth_sets.erase(m_cubes_depth_sets.size() - 1); + } else { + auto& cube = m_cubes.back(); + expr_ref_vector l_cube(g2l.to()); + for (auto& e : cube) { + l_cube.push_back(g2l(e)); + } + cubes.push_back(l_cube); + m_cubes.pop_back(); } - cubes.push_back(l_cube); - m_cubes.pop_back(); } } @@ -341,7 +360,7 @@ namespace smt { return l_undef; // the main context was cancelled, so we return undef. switch (m_state) { case state::is_running: // batch manager is still running, but all threads have processed their cubes, which means all cubes were unsat - if (!m_cubes.empty()) + if (!m_cubes.empty() || (m_config.m_frugal_deepest_cube_only && !m_cubes_depth_sets.empty())) throw default_exception("inconsistent end state"); if (!p.m_assumptions_used.empty()) { // collect unsat core from assumptions used, if any --> case when all cubes were unsat, but depend on nonempty asms, so we need to add these asms to final unsat core @@ -439,9 +458,36 @@ namespace smt { } }; + // apply the frugal strategy to ALL incoming worker cubes, but save in the deepest cube data structure + auto add_split_atom_deepest_cubes = [&](expr* atom) { + for (auto& c : C_worker) { + expr_ref_vector g_cube(l2g.to()); + for (auto& atom : c) + g_cube.push_back(l2g(atom)); + if (g_cube.size() >= m_config.m_max_cube_size) // we already added this before!! we just need to add the splits now + continue; + + // add depth set d+1 if it doesn't exist yet + unsigned d = g_cube.size(); + if (m_cubes_depth_sets.find(d + 1) == m_cubes_depth_sets.end()) + m_cubes_depth_sets[d + 1] = vector(); + + // split on the negative atom + m_cubes_depth_sets[d + 1].push_back(g_cube); + m_cubes_depth_sets[d + 1].back().push_back(m.mk_not(atom)); + + // need to split on the positive atom too + g_cube.push_back(atom); + m_cubes_depth_sets[d + 1].push_back(g_cube); + + m_stats.m_num_cubes += 2; + m_stats.m_max_cube_size = std::max(m_stats.m_max_cube_size, d + 1); + } + }; + std::scoped_lock lock(mux); - unsigned max_cubes = 1000; - bool greedy_mode = (m_cubes.size() <= max_cubes) && !m_config.m_frugal_cube_only; + unsigned max_greedy_cubes = 1000; + bool greedy_mode = (m_cubes.size() <= max_greedy_cubes) && !m_config.m_frugal_cube_only && !m_config.m_frugal_deepest_cube_only; unsigned a_worker_start_idx = 0; // @@ -455,7 +501,7 @@ namespace smt { m_split_atoms.push_back(g_atom); add_split_atom(g_atom, 0); // split all *existing* cubes - if (m_cubes.size() > max_cubes) { + if (m_cubes.size() > max_greedy_cubes) { greedy_mode = false; ++a_worker_start_idx; // start frugal from here break; @@ -470,16 +516,26 @@ namespace smt { expr_ref_vector g_cube(l2g.to()); for (auto& atom : c) g_cube.push_back(l2g(atom)); - unsigned start = m_cubes.size(); // update start after adding each cube so we only process the current cube being added - m_cubes.push_back(g_cube); + + if (m_config.m_frugal_deepest_cube_only) { + // need to add the depth set if it doesn't exist yet + if (m_cubes_depth_sets.find(g_cube.size()) == m_cubes_depth_sets.end()) { + m_cubes_depth_sets[g_cube.size()] = vector(); + } + m_cubes_depth_sets[g_cube.size()].push_back(g_cube); + m_stats.m_num_cubes += 1; + m_stats.m_max_cube_size = std::max(m_stats.m_max_cube_size, g_cube.size()); + } else { + m_cubes.push_back(g_cube); + } if (greedy_mode) { // Split new cube on all existing m_split_atoms not in it for (auto g_atom : m_split_atoms) { if (!atom_in_cube(g_cube, g_atom)) { add_split_atom(g_atom, start); - if (m_cubes.size() > max_cubes) { + if (m_cubes.size() > max_greedy_cubes) { greedy_mode = false; break; } @@ -494,53 +550,57 @@ namespace smt { expr_ref g_atom(l2g(A_worker[i]), l2g.to()); if (!m_split_atoms.contains(g_atom)) m_split_atoms.push_back(g_atom); - add_split_atom(g_atom, initial_m_cubes_size); + if (m_config.m_frugal_deepest_cube_only) { + add_split_atom_deepest_cubes(g_atom); + } else { + add_split_atom(g_atom, initial_m_cubes_size); + } } } } expr_ref_vector parallel::worker::get_split_atoms() { -#if false + unsigned k = 2; + + // auto candidates = ctx->m_pq_scores.get_heap(); + auto candidates = ctx->m_lit_scores; + std::vector> top_k; // will hold at most k elements + for (bool_var v = 0; v < ctx->get_num_bool_vars(); ++v) { if (ctx->get_assignment(v) != l_undef) continue; expr* e = ctx->bool_var2expr(v); if (!e) continue; - auto v_score = ctx->m_lit_scores[0][v] * ctx->m_lit_scores[1][v]; - // if v_score is maximal then v is our split atom.. + double score = ctx->m_lit_scores[0][v] * ctx->m_lit_scores[1][v]; + + // decay the scores ctx->m_lit_scores[0][v] /= 2; ctx->m_lit_scores[1][v] /= 2; - } -#endif - unsigned k = 2; - auto candidates = ctx->m_pq_scores.get_heap(); - - std::sort(candidates.begin(), candidates.end(), - [](const auto& a, const auto& b) { return a.priority > b.priority; }); + // insert into top_k. linear scan since k is very small + if (top_k.size() < k) { + top_k.push_back({score, e}); + } else { + // find the smallest in top_k and replace if we found a new min + size_t min_idx = 0; + for (size_t i = 1; i < k; ++i) + if (top_k[i].first < top_k[min_idx].first) + min_idx = i; + + if (score > top_k[min_idx].first) { + top_k[min_idx] = {score, e}; + } + } + } expr_ref_vector top_lits(m); - for (const auto& node: candidates) { - - if (ctx->get_assignment(node.key) != l_undef) - continue; - - if (m_config.m_cube_initial_only && node.key >= m_num_initial_atoms) { - LOG_WORKER(2, " Skipping non-initial atom from cube: " << node.key << "\n"); - continue; // skip non-initial atoms if configured to do so - } - - expr* e = ctx->bool_var2expr(node.key); - if (!e) - continue; - - top_lits.push_back(expr_ref(e, m)); - if (top_lits.size() >= k) - break; - } - IF_VERBOSE(3, verbose_stream() << "top literals " << top_lits << " head size " << ctx->m_pq_scores.size() << " num vars " << ctx->get_num_bool_vars() << "\n"); + for (auto& p : top_k) + top_lits.push_back(expr_ref(p.second, m)); + + IF_VERBOSE(3, verbose_stream() << "top literals " << top_lits << " head size " << ctx->m_lit_scores->size() << " num vars " << ctx->get_num_bool_vars() << "\n"); + return top_lits; } @@ -548,11 +608,17 @@ namespace smt { m_state = state::is_running; m_cubes.reset(); m_cubes.push_back(expr_ref_vector(m)); // push empty cube + + if (m_config.m_frugal_deepest_cube_only) { + m_cubes_depth_sets.clear(); + } + m_split_atoms.reset(); smt_parallel_params sp(p.ctx.m_params); m_config.m_max_cube_size = sp.max_cube_size(); m_config.m_frugal_cube_only = sp.frugal_cube_only(); m_config.m_never_cube = sp.never_cube(); + m_config.m_frugal_deepest_cube_only = sp.frugal_deepest_cube_only(); } void parallel::batch_manager::collect_statistics(::statistics& st) const { diff --git a/src/smt/smt_parallel.h b/src/smt/smt_parallel.h index dd9a15c82..ae084a65e 100644 --- a/src/smt/smt_parallel.h +++ b/src/smt/smt_parallel.h @@ -20,6 +20,7 @@ Revision History: #include "smt/smt_context.h" #include +#include namespace smt { @@ -49,6 +50,7 @@ namespace smt { unsigned m_max_cube_size = 20; bool m_frugal_cube_only = false; bool m_never_cube = false; + bool m_frugal_deepest_cube_only = false; }; struct stats { unsigned m_max_cube_size = 0; @@ -63,6 +65,7 @@ namespace smt { stats m_stats; expr_ref_vector m_split_atoms; // atoms to split on vector m_cubes; + std::map> m_cubes_depth_sets; // map> contains sets of cubes, key is depth/size of cubes in the set unsigned m_max_batch_size = 10; unsigned m_exception_code = 0; std::string m_exception_msg; @@ -120,6 +123,8 @@ namespace smt { double m_max_conflict_mul = 1.5; bool m_share_units_initial_only = false; bool m_cube_initial_only = false; + bool m_max_greedy_cubes = 1000; + unsigned m_num_split_lits = 2; }; unsigned id; // unique identifier for the worker