3
0
Fork 0
mirror of https://github.com/Z3Prover/z3 synced 2025-04-08 10:25:18 +00:00

import csp progress

Signed-off-by: Nikolaj Bjorner <nbjorner@microsoft.com>
This commit is contained in:
Nikolaj Bjorner 2019-02-14 17:09:18 -08:00
parent 45aa8dd39a
commit e4c6dcd84c
3 changed files with 83 additions and 37 deletions

View file

@ -148,7 +148,7 @@ public:
bool is_resource(expr* e, unsigned& r);
bool is_makespan(expr* e, unsigned& r);
bool is_add_resource_available(expr * e, expr *& res, unsigned& loadpct, unsigned& cap_time, uint64_t& start, uint64_t& end, svector<symbol>& properites);
bool is_add_job_resource(expr * e, expr *& job, expr*& res, unsigned& loadpct, uint64_t& capacity, uint64_t& end, svector<symbol>& properites);
bool is_add_job_resource(expr * e, expr *& job, expr*& res, unsigned& loadpct, uint64_t& capacity, uint64_t& finite_capacity_end, svector<symbol>& properites);
bool is_set_preemptable(expr* e, expr *& job);
bool is_model(expr* e) const { return is_app_of(e, m_fid, OP_JS_MODEL); }
bool is_js_properties(expr* e, svector<symbol>& properties);

View file

@ -305,7 +305,6 @@ namespace smt {
literal end_ge_lo = mk_ge(ji.m_end, clb);
// Initialization ensures that satisfiable states have completion time below end.
VERIFY(clb <= get_job_resource(j, r).m_end);
region& r = ctx.get_region();
ctx.assign(end_ge_lo,
ctx.mk_justification(
@ -321,6 +320,10 @@ namespace smt {
*/
bool theory_jobscheduler::constrain_end_time_interval(unsigned j, unsigned r) {
unsigned idx1 = 0, idx2 = 0;
if (!job_has_resource(j, r)) {
IF_VERBOSE(0, verbose_stream() << "job " << j << " assigned non-registered resource " << r << "\n");
return false;
}
time_t s = start(j);
job_resource const& jr = get_job_resource(j, r);
TRACE("csp", tout << "job: " << j << " resource: " << r << " start: " << s << "\n";);
@ -451,6 +454,7 @@ namespace smt {
job_info const& ji = m_jobs[j];
VERIFY(u.is_resource(ji.m_job2resource->get_root()->get_owner(), r));
TRACE("csp", tout << "job: " << j << " resource: " << r << "\n";);
std::cout << j << " -o " << r << "\n";
propagate_job2resource(j, r);
}
}
@ -458,8 +462,13 @@ namespace smt {
void theory_jobscheduler::propagate_job2resource(unsigned j, unsigned r) {
job_info const& ji = m_jobs[j];
res_info const& ri = m_resources[r];
job_resource const& jr = get_job_resource(j, r);
literal eq = mk_eq_lit(ji.m_job2resource, ri.m_resource);
if (!job_has_resource(j, r)) {
IF_VERBOSE(0, verbose_stream() << "job " << j << " assigned non-registered resource " << r << "\n");
return;
}
return;
job_resource const& jr = get_job_resource(j, r);
assert_last_end_time(j, r, jr, eq);
assert_last_start_time(j, r, eq);
assert_first_start_time(j, r, eq);
@ -489,7 +498,7 @@ namespace smt {
}
std::ostream& theory_jobscheduler::display(std::ostream & out, job_resource const& jr) const {
return out << "r:" << jr.m_resource_id << " cap:" << jr.m_capacity << " load:" << jr.m_loadpct << " end:" << jr.m_end;
return out << "r:" << jr.m_resource_id << " cap:" << jr.m_capacity << " load:" << jr.m_loadpct << " end:" << jr.m_finite_capacity_end;
for (auto const& s : jr.m_properties) out << " " << s; out << "\n";
}
@ -620,21 +629,30 @@ namespace smt {
}
void theory_jobscheduler::set_preemptable(unsigned j, bool is_preemptable) {
m_jobs.reserve(j + 1);
m_jobs[j].m_is_preemptable = is_preemptable;
ensure_job(j).m_is_preemptable = is_preemptable;
}
void theory_jobscheduler::add_job_resource(unsigned j, unsigned r, unsigned loadpct, uint64_t cap, time_t end, properties const& ps) {
SASSERT(get_context().at_base_level());
SASSERT(0 <= loadpct && loadpct <= 100);
SASSERT(0 <= cap);
m_jobs.reserve(j + 1);
m_resources.reserve(r + 1);
job_info& ji = m_jobs[j];
if (ji.m_resource2index.contains(r)) {
throw default_exception("resource already bound to job");
theory_jobscheduler::res_info& theory_jobscheduler::ensure_resource(unsigned last) {
while (m_resources.size() <= last) {
unsigned r = m_resources.size();
m_resources.push_back(res_info());
res_info& ri = m_resources.back();
context& ctx = get_context();
app_ref res(u.mk_resource(r), m);
if (!ctx.e_internalized(res)) ctx.internalize(res, false);
ri.m_resource = ctx.get_enode(res);
app_ref ms(u.mk_makespan(r), m);
if (!ctx.e_internalized(ms)) ctx.internalize(ms, false);
ri.m_makespan = ctx.get_enode(ms);
}
if (!ji.m_start) {
return m_resources[last];
}
theory_jobscheduler::job_info& theory_jobscheduler::ensure_job(unsigned last) {
while (m_jobs.size() <= last) {
unsigned j = m_jobs.size();
m_jobs.push_back(job_info());
job_info& ji = m_jobs.back();
context& ctx = get_context();
app_ref job(u.mk_job(j), m);
app_ref start(u.mk_start(j), m);
@ -649,10 +667,22 @@ namespace smt {
ji.m_end = ctx.get_enode(end);
ji.m_job2resource = ctx.get_enode(res);
}
return m_jobs[last];
}
void theory_jobscheduler::add_job_resource(unsigned j, unsigned r, unsigned loadpct, uint64_t cap, time_t finite_capacity_end, properties const& ps) {
SASSERT(get_context().at_base_level());
SASSERT(0 <= loadpct && loadpct <= 100);
SASSERT(0 <= cap);
job_info& ji = ensure_job(j);
res_info& ri = ensure_resource(r);
if (ji.m_resource2index.contains(r)) {
throw default_exception("resource already bound to job");
}
ji.m_resource2index.insert(r, ji.m_resources.size());
ji.m_resources.push_back(job_resource(r, cap, loadpct, end, ps));
SASSERT(!m_resources[r].m_jobs.contains(j));
m_resources[r].m_jobs.push_back(j);
ji.m_resources.push_back(job_resource(r, cap, loadpct, finite_capacity_end, ps));
SASSERT(!ri.m_jobs.contains(j));
ri.m_jobs.push_back(j);
}
@ -660,17 +690,7 @@ namespace smt {
SASSERT(get_context().at_base_level());
SASSERT(1 <= max_loadpct && max_loadpct <= 100);
SASSERT(start <= end);
m_resources.reserve(r + 1);
res_info& ri = m_resources[r];
if (!ri.m_resource) {
context& ctx = get_context();
app_ref res(u.mk_resource(r), m);
if (!ctx.e_internalized(res)) ctx.internalize(res, false);
ri.m_resource = ctx.get_enode(res);
app_ref ms(u.mk_makespan(r), m);
if (!ctx.e_internalized(ms)) ctx.internalize(ms, false);
ri.m_makespan = ctx.get_enode(ms);
}
res_info& ri = ensure_resource(r);
ri.m_available.push_back(res_available(max_loadpct, start, end, ps));
}
@ -718,22 +738,37 @@ namespace smt {
for (job_resource const& jr : ji.m_resources) {
unsigned r = jr.m_resource_id;
res_info const& ri = m_resources[r];
if (ri.m_available.empty()) continue;
if (ri.m_available.empty()) {
if (jr.m_capacity == 0) {
start_lb = 0;
end_ub = std::numeric_limits<time_t>::max();
runtime_lb = 0;
}
continue;
}
unsigned idx = 0;
if (first_available(jr, ri, idx)) {
start_lb = std::min(start_lb, ri.m_available[idx].m_start);
}
else {
IF_VERBOSE(0, verbose_stream() << "not first-available\n";);
}
idx = ri.m_available.size();
if (last_available(jr, ri, idx)) {
end_ub = std::max(end_ub, ri.m_available[idx].m_end);
}
else {
IF_VERBOSE(0, verbose_stream() << "not last-available\n";);
}
runtime_lb = std::min(runtime_lb, jr.m_capacity);
// TBD: more accurate estimates for runtime_lb based on gaps
// TBD: correct estimate of runtime_ub taking gaps into account.
}
CTRACE("csp", (start_lb > end_ub), tout << "there is no associated resource working time\n";);
if (start_lb > end_ub) {
IF_VERBOSE(0, verbose_stream() << start_lb << " " << end_ub << "\n");
warning_msg("Job %d has no associated resource working time", job_id);
continue;
}
// start(j) >= start_lb
@ -755,9 +790,11 @@ namespace smt {
// resource(j) = r => end(j) <= end(j, r)
void theory_jobscheduler::assert_last_end_time(unsigned j, unsigned r, job_resource const& jr, literal eq) {
#if 0
job_info const& ji = m_jobs[j];
literal l2 = mk_le(ji.m_end, jr.m_end);
literal l2 = mk_le(ji.m_end, jr.m_finite_capacity_end);
get_context().mk_th_axiom(get_id(), ~eq, l2);
#endif
}
// resource(j) = r => start(j) <= lst(j, r, end(j, r))
@ -880,6 +917,10 @@ namespace smt {
}
}
bool theory_jobscheduler::job_has_resource(unsigned j, unsigned r) const {
return m_jobs[j].m_resource2index.contains(r);
}
theory_jobscheduler::job_resource const& theory_jobscheduler::get_job_resource(unsigned j, unsigned r) const {
job_info const& ji = m_jobs[j];
return ji.m_resources[ji.m_resource2index[r]];
@ -1031,7 +1072,9 @@ namespace smt {
bool theory_jobscheduler::resource_available(job_resource const& jr, res_available const& ra) const {
auto const& jps = jr.m_properties;
auto const& rps = ra.m_properties;
if (jps.size() > rps.size()) return false;
if (jps.size() > rps.size()) {
return false;
}
unsigned j = 0, i = 0;
for (; i < jps.size() && j < rps.size(); ) {
if (jps[i] == rps[j]) {

View file

@ -38,10 +38,10 @@ namespace smt {
unsigned m_resource_id; // id of resource
time_t m_capacity; // amount of resource to use
unsigned m_loadpct; // assuming loadpct
time_t m_end; // must run before
time_t m_finite_capacity_end;
properties m_properties;
job_resource(unsigned r, time_t cap, unsigned loadpct, time_t end, properties const& ps):
m_resource_id(r), m_capacity(cap), m_loadpct(loadpct), m_end(end), m_properties(ps) {}
m_resource_id(r), m_capacity(cap), m_loadpct(loadpct), m_finite_capacity_end(end), m_properties(ps) {}
};
struct job_time {
@ -89,10 +89,9 @@ namespace smt {
struct res_info {
unsigned_vector m_jobs; // jobs allocated to run on resource
vector<res_available> m_available; // time intervals where resource is available
time_t m_end; // can't run after
enode* m_resource;
enode* m_makespan;
res_info(): m_end(std::numeric_limits<time_t>::max()), m_resource(nullptr), m_makespan(nullptr) {}
res_info(): m_resource(nullptr), m_makespan(nullptr) {}
};
ast_manager& m;
@ -152,6 +151,9 @@ namespace smt {
theory * mk_fresh(context * new_ctx) override;
res_info& ensure_resource(unsigned r);
job_info& ensure_job(unsigned j);
public:
// set up job/resource global constraints
void set_preemptable(unsigned j, bool is_preemptable);
@ -189,6 +191,7 @@ namespace smt {
time_t capacity_used(unsigned j, unsigned r, time_t start, time_t end); // capacity used between start and end
job_resource const& get_job_resource(unsigned j, unsigned r) const;
bool job_has_resource(unsigned j, unsigned r) const;
// propagation
void propagate_end_time(unsigned j, unsigned r);