def_module_params('smt_parallel', export=True, description='Experimental parameters for parallel solving', params=( ('share_units', BOOL, True, 'share units'), ('share_conflicts', BOOL, True, 'share conflicts'), ('never_cube', BOOL, False, 'never cube'), ('frugal_cube_only', BOOL, False, 'only apply frugal cube strategy'), ('relevant_units_only', BOOL, True, 'only share relvant units'), ('max_conflict_mul', DOUBLE, 1.5, 'increment multiplier for max-conflicts'), ('share_units_initial_only', BOOL, True, 'share only initial Boolean atoms as units'), ('cube_initial_only', BOOL, False, 'cube only on initial Boolean atoms'), ('max_cube_depth', UINT, 20, 'maximum depth (size) of a cube to share'), ('max_greedy_cubes', UINT, 1000, 'maximum number of cube to greedily share before switching to frugal'), ('num_split_lits', UINT, 2, 'how many literals, k, we split on to create 2^k cubes'), ('depth_splitting_only', BOOL, False, 'only apply frugal cube strategy, and only on deepest (biggest) cubes from the batch manager'), ('backbone_detection', BOOL, False, 'apply backbone literal heuristic'), ('iterative_deepening', BOOL, False, 'deepen cubes based on iterative hardness cutoff heuristic'), ('beam_search', BOOL, False, 'use beam search with PQ to rank cubes given to threads'), ('explicit_hardness', BOOL, False, 'use explicit hardness metric for cube'), ('cubetree', BOOL, False, 'use cube tree data structure for storing cubes'), ('searchtree', BOOL, False, 'use search tree implementation (parallel2)'), ('inprocessing', BOOL, False, 'integrate in-processing as a heuristic simplification'), ('inprocessing_delay', UINT, 0, 'number of undef before invoking simplification') ))