mirror of
https://github.com/Z3Prover/z3
synced 2026-02-16 22:01:44 +00:00
Merge branch 'master' of https://github.com/z2prover/z3
This commit is contained in:
commit
e099c74985
11 changed files with 114 additions and 956 deletions
131
.github/workflows/a3-python-v2.lock.yml
generated
vendored
131
.github/workflows/a3-python-v2.lock.yml
generated
vendored
|
|
@ -13,17 +13,17 @@
|
|||
# \ /\ / (_) | | | | ( | | | | (_) \ V V /\__ \
|
||||
# \/ \/ \___/|_| |_|\_\|_| |_|\___/ \_/\_/ |___/
|
||||
#
|
||||
# This file was automatically generated by gh-aw (v0.43.15). DO NOT EDIT.
|
||||
# This file was automatically generated by gh-aw (v0.45.0). DO NOT EDIT.
|
||||
#
|
||||
# To update this file, edit z3prover/z3/a3/a3-python-v2.md@a91c5c58bd975f336bf5b744885ffd4b36b2d2ec and run:
|
||||
# To update this file, edit the corresponding .md file and run:
|
||||
# gh aw compile
|
||||
# For more information: https://github.com/github/gh-aw/blob/main/.github/aw/github-agentic-workflows.md
|
||||
# Not all edits will cause changes to this file.
|
||||
#
|
||||
# For more information: https://github.github.com/gh-aw/introduction/overview/
|
||||
#
|
||||
# Analyzes Python code using a3-python tool to identify bugs and issues
|
||||
#
|
||||
# Source: z3prover/z3/a3/a3-python-v2.md@a91c5c58bd975f336bf5b744885ffd4b36b2d2ec
|
||||
#
|
||||
# frontmatter-hash: d9a6c5f3f2e813814d61c506eb99afad90a7c9abfebc0efc3c6e656ea818e45e
|
||||
# frontmatter-hash: 5cf7ccf6678127541919747c71a65386e7e5be18f41536192360a52b167f4bab
|
||||
|
||||
name: "A3 Python Code Analysis"
|
||||
"on":
|
||||
|
|
@ -48,7 +48,7 @@ jobs:
|
|||
comment_repo: ""
|
||||
steps:
|
||||
- name: Setup Scripts
|
||||
uses: github/gh-aw/actions/setup@a0e753a02a1b3edc578b5c4c9d5d4eaf81ced5bd # v0.43.15
|
||||
uses: github/gh-aw/actions/setup@v0.45.0
|
||||
with:
|
||||
destination: /opt/gh-aw/actions
|
||||
- name: Check workflow file timestamps
|
||||
|
|
@ -80,6 +80,7 @@ jobs:
|
|||
GH_AW_SAFE_OUTPUTS: /opt/gh-aw/safeoutputs/outputs.jsonl
|
||||
GH_AW_SAFE_OUTPUTS_CONFIG_PATH: /opt/gh-aw/safeoutputs/config.json
|
||||
GH_AW_SAFE_OUTPUTS_TOOLS_PATH: /opt/gh-aw/safeoutputs/tools.json
|
||||
GH_AW_WORKFLOW_ID_SANITIZED: a3pythonv2
|
||||
outputs:
|
||||
checkout_pr_success: ${{ steps.checkout-pr.outputs.checkout_pr_success || 'true' }}
|
||||
has_patch: ${{ steps.collect_output.outputs.has_patch }}
|
||||
|
|
@ -89,7 +90,7 @@ jobs:
|
|||
secret_verification_result: ${{ steps.validate-secret.outputs.verification_result }}
|
||||
steps:
|
||||
- name: Setup Scripts
|
||||
uses: github/gh-aw/actions/setup@a0e753a02a1b3edc578b5c4c9d5d4eaf81ced5bd # v0.43.15
|
||||
uses: github/gh-aw/actions/setup@v0.45.0
|
||||
with:
|
||||
destination: /opt/gh-aw/actions
|
||||
- name: Checkout repository
|
||||
|
|
@ -100,7 +101,8 @@ jobs:
|
|||
run: bash /opt/gh-aw/actions/create_gh_aw_tmp_dir.sh
|
||||
- name: Checkout Python source files
|
||||
run: |-
|
||||
git sparse-checkout add src
|
||||
git sparse-checkout init --cone
|
||||
git sparse-checkout set src
|
||||
echo "Source files checked out for Python analysis"
|
||||
|
||||
- name: Configure Git credentials
|
||||
|
|
@ -140,8 +142,8 @@ jobs:
|
|||
engine_name: "GitHub Copilot CLI",
|
||||
model: process.env.GH_AW_MODEL_AGENT_COPILOT || "",
|
||||
version: "",
|
||||
agent_version: "0.0.407",
|
||||
cli_version: "v0.43.15",
|
||||
agent_version: "0.0.410",
|
||||
cli_version: "v0.45.0",
|
||||
workflow_name: "A3 Python Code Analysis",
|
||||
experimental: false,
|
||||
supports_tools_allowlist: true,
|
||||
|
|
@ -155,10 +157,10 @@ jobs:
|
|||
actor: context.actor,
|
||||
event_name: context.eventName,
|
||||
staged: false,
|
||||
allowed_domains: ["default","python"],
|
||||
allowed_domains: ["defaults","python"],
|
||||
firewall_enabled: true,
|
||||
awf_version: "v0.16.1",
|
||||
awmg_version: "",
|
||||
awf_version: "v0.18.0",
|
||||
awmg_version: "v0.1.4",
|
||||
steps: {
|
||||
firewall: "squid"
|
||||
},
|
||||
|
|
@ -179,21 +181,21 @@ jobs:
|
|||
env:
|
||||
COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }}
|
||||
- name: Install GitHub Copilot CLI
|
||||
run: /opt/gh-aw/actions/install_copilot_cli.sh 0.0.407
|
||||
run: /opt/gh-aw/actions/install_copilot_cli.sh 0.0.410
|
||||
- name: Install awf binary
|
||||
run: bash /opt/gh-aw/actions/install_awf_binary.sh v0.16.1
|
||||
- name: Determine automatic lockdown mode for GitHub MCP server
|
||||
run: bash /opt/gh-aw/actions/install_awf_binary.sh v0.18.0
|
||||
- name: Determine automatic lockdown mode for GitHub MCP Server
|
||||
id: determine-automatic-lockdown
|
||||
env:
|
||||
TOKEN_CHECK: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN }}
|
||||
if: env.TOKEN_CHECK != ''
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
|
||||
env:
|
||||
GH_AW_GITHUB_TOKEN: ${{ secrets.GH_AW_GITHUB_TOKEN }}
|
||||
GH_AW_GITHUB_MCP_SERVER_TOKEN: ${{ secrets.GH_AW_GITHUB_MCP_SERVER_TOKEN }}
|
||||
with:
|
||||
script: |
|
||||
const determineAutomaticLockdown = require('/opt/gh-aw/actions/determine_automatic_lockdown.cjs');
|
||||
await determineAutomaticLockdown(github, context, core);
|
||||
- name: Download container images
|
||||
run: bash /opt/gh-aw/actions/download_docker_images.sh ghcr.io/github/gh-aw-firewall/agent:0.16.1 ghcr.io/github/gh-aw-firewall/squid:0.16.1 ghcr.io/github/gh-aw-mcpg:v0.1.4 ghcr.io/github/github-mcp-server:v0.30.3 ghcr.io/github/serena-mcp-server:latest node:lts-alpine
|
||||
run: bash /opt/gh-aw/actions/download_docker_images.sh ghcr.io/github/gh-aw-firewall/agent:0.18.0 ghcr.io/github/gh-aw-firewall/squid:0.18.0 ghcr.io/github/gh-aw-mcpg:v0.1.4 ghcr.io/github/github-mcp-server:v0.30.3 node:lts-alpine
|
||||
- name: Write Safe Outputs Config
|
||||
run: |
|
||||
mkdir -p /opt/gh-aw/safeoutputs
|
||||
|
|
@ -221,14 +223,15 @@ jobs:
|
|||
"type": "array"
|
||||
},
|
||||
"parent": {
|
||||
"description": "Parent issue number for creating sub-issues. This is the numeric ID from the GitHub URL (e.g., 42 in github.com/owner/repo/issues/42). Can also be a temporary_id (e.g., 'aw_abc123def456') from a previously created issue in the same workflow run.",
|
||||
"description": "Parent issue number for creating sub-issues. This is the numeric ID from the GitHub URL (e.g., 42 in github.com/owner/repo/issues/42). Can also be a temporary_id (e.g., 'aw_abc123', 'aw_Test123') from a previously created issue in the same workflow run.",
|
||||
"type": [
|
||||
"number",
|
||||
"string"
|
||||
]
|
||||
},
|
||||
"temporary_id": {
|
||||
"description": "Unique temporary identifier for referencing this issue before it's created. Format: 'aw_' followed by 12 hex characters (e.g., 'aw_abc123def456'). Use '#aw_ID' in body text to reference other issues by their temporary_id; these are replaced with actual issue numbers after creation.",
|
||||
"description": "Unique temporary identifier for referencing this issue before it's created. Format: 'aw_' followed by 3 to 8 alphanumeric characters (e.g., 'aw_abc1', 'aw_Test123'). Use '#aw_ID' in body text to reference other issues by their temporary_id; these are replaced with actual issue numbers after creation.",
|
||||
"pattern": "^aw_[A-Za-z0-9]{4,8}$",
|
||||
"type": "string"
|
||||
},
|
||||
"title": {
|
||||
|
|
@ -422,7 +425,7 @@ jobs:
|
|||
|
||||
bash /opt/gh-aw/actions/start_safe_outputs_server.sh
|
||||
|
||||
- name: Start MCP gateway
|
||||
- name: Start MCP Gateway
|
||||
id: start-mcp-gateway
|
||||
env:
|
||||
GH_AW_SAFE_OUTPUTS: ${{ env.GH_AW_SAFE_OUTPUTS }}
|
||||
|
|
@ -467,14 +470,6 @@ jobs:
|
|||
"headers": {
|
||||
"Authorization": "\${GH_AW_SAFE_OUTPUTS_API_KEY}"
|
||||
}
|
||||
},
|
||||
"serena": {
|
||||
"type": "stdio",
|
||||
"container": "ghcr.io/github/serena-mcp-server:latest",
|
||||
"args": ["--network", "host"],
|
||||
"entrypoint": "serena",
|
||||
"entrypointArgs": ["start-mcp-server", "--context", "codex", "--project", "\${GITHUB_WORKSPACE}"],
|
||||
"mounts": ["\${GITHUB_WORKSPACE}:\${GITHUB_WORKSPACE}:rw"]
|
||||
}
|
||||
},
|
||||
"gateway": {
|
||||
|
|
@ -508,6 +503,7 @@ jobs:
|
|||
cat << 'GH_AW_PROMPT_EOF' > "$GH_AW_PROMPT"
|
||||
<system>
|
||||
GH_AW_PROMPT_EOF
|
||||
cat "/opt/gh-aw/prompts/xpia.md" >> "$GH_AW_PROMPT"
|
||||
cat "/opt/gh-aw/prompts/temp_folder_prompt.md" >> "$GH_AW_PROMPT"
|
||||
cat "/opt/gh-aw/prompts/markdown.md" >> "$GH_AW_PROMPT"
|
||||
cat << 'GH_AW_PROMPT_EOF' >> "$GH_AW_PROMPT"
|
||||
|
|
@ -519,6 +515,19 @@ jobs:
|
|||
<instructions>
|
||||
To create or modify GitHub resources (issues, discussions, pull requests, etc.), you MUST call the appropriate safe output tool. Simply writing content will NOT work - the workflow requires actual tool calls.
|
||||
|
||||
Temporary IDs: Some safe output tools support a temporary ID field (usually named temporary_id) so you can reference newly-created items elsewhere in the SAME agent output (for example, using #aw_abc1 in a later body).
|
||||
|
||||
**IMPORTANT - temporary_id format rules:**
|
||||
- If you DON'T need to reference the item later, OMIT the temporary_id field entirely (it will be auto-generated if needed)
|
||||
- If you DO need cross-references/chaining, you MUST match this EXACT validation regex: /^aw_[A-Za-z0-9]{3,8}$/i
|
||||
- Format: aw_ prefix followed by 3 to 8 alphanumeric characters (A-Z, a-z, 0-9, case-insensitive)
|
||||
- Valid alphanumeric characters: ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789
|
||||
- INVALID examples: aw_ab (too short), aw_123456789 (too long), aw_test-id (contains hyphen), aw_id_123 (contains underscore)
|
||||
- VALID examples: aw_abc, aw_abc1, aw_Test123, aw_A1B2C3D4, aw_12345678
|
||||
- To generate valid IDs: use 3-8 random alphanumeric characters or omit the field to let the system auto-generate
|
||||
|
||||
Do NOT invent other aw_* formats — downstream steps will reject them with validation errors matching against /^aw_[A-Za-z0-9]{3,8}$/i.
|
||||
|
||||
Discover available tools from the safeoutputs MCP server.
|
||||
|
||||
**Critical**: Tool calls write structured data that downstream jobs process. Without tool calls, follow-up actions will be skipped.
|
||||
|
|
@ -619,9 +628,8 @@ jobs:
|
|||
timeout-minutes: 45
|
||||
run: |
|
||||
set -o pipefail
|
||||
sudo -E awf --env-all --container-workdir "${GITHUB_WORKSPACE}" --allow-domains '*.pythonhosted.org,anaconda.org,api.business.githubcopilot.com,api.enterprise.githubcopilot.com,api.github.com,api.githubcopilot.com,api.individual.githubcopilot.com,binstar.org,bootstrap.pypa.io,conda.anaconda.org,conda.binstar.org,default,files.pythonhosted.org,github.com,host.docker.internal,pip.pypa.io,pypi.org,pypi.python.org,raw.githubusercontent.com,registry.npmjs.org,repo.anaconda.com,repo.continuum.io,telemetry.enterprise.githubcopilot.com' --log-level info --proxy-logs-dir /tmp/gh-aw/sandbox/firewall/logs --enable-host-access --image-tag 0.16.1 --skip-pull \
|
||||
-- '/usr/local/bin/copilot --add-dir /tmp/gh-aw/ --log-level all --log-dir /tmp/gh-aw/sandbox/agent/logs/ --add-dir "${GITHUB_WORKSPACE}" --disable-builtin-mcps --allow-all-tools --allow-all-paths --share /tmp/gh-aw/sandbox/agent/logs/conversation.md --prompt "$(cat /tmp/gh-aw/aw-prompts/prompt.txt)"${GH_AW_MODEL_AGENT_COPILOT:+ --model "$GH_AW_MODEL_AGENT_COPILOT"}' \
|
||||
2>&1 | tee /tmp/gh-aw/agent-stdio.log
|
||||
sudo -E awf --env-all --container-workdir "${GITHUB_WORKSPACE}" --allow-domains '*.pythonhosted.org,anaconda.org,api.business.githubcopilot.com,api.enterprise.githubcopilot.com,api.github.com,api.githubcopilot.com,api.individual.githubcopilot.com,api.snapcraft.io,archive.ubuntu.com,azure.archive.ubuntu.com,binstar.org,bootstrap.pypa.io,conda.anaconda.org,conda.binstar.org,crl.geotrust.com,crl.globalsign.com,crl.identrust.com,crl.sectigo.com,crl.thawte.com,crl.usertrust.com,crl.verisign.com,crl3.digicert.com,crl4.digicert.com,crls.ssl.com,files.pythonhosted.org,github.com,host.docker.internal,json-schema.org,json.schemastore.org,keyserver.ubuntu.com,ocsp.digicert.com,ocsp.geotrust.com,ocsp.globalsign.com,ocsp.identrust.com,ocsp.sectigo.com,ocsp.ssl.com,ocsp.thawte.com,ocsp.usertrust.com,ocsp.verisign.com,packagecloud.io,packages.cloud.google.com,packages.microsoft.com,pip.pypa.io,ppa.launchpad.net,pypi.org,pypi.python.org,raw.githubusercontent.com,registry.npmjs.org,repo.anaconda.com,repo.continuum.io,s.symcb.com,s.symcd.com,security.ubuntu.com,telemetry.enterprise.githubcopilot.com,ts-crl.ws.symantec.com,ts-ocsp.ws.symantec.com' --log-level info --proxy-logs-dir /tmp/gh-aw/sandbox/firewall/logs --enable-host-access --image-tag 0.18.0 --skip-pull \
|
||||
-- /bin/bash -c '/usr/local/bin/copilot --add-dir /tmp/gh-aw/ --log-level all --log-dir /tmp/gh-aw/sandbox/agent/logs/ --add-dir "${GITHUB_WORKSPACE}" --disable-builtin-mcps --allow-all-tools --allow-all-paths --share /tmp/gh-aw/sandbox/agent/logs/conversation.md --prompt "$(cat /tmp/gh-aw/aw-prompts/prompt.txt)"${GH_AW_MODEL_AGENT_COPILOT:+ --model "$GH_AW_MODEL_AGENT_COPILOT"}' 2>&1 | tee -a /tmp/gh-aw/agent-stdio.log
|
||||
env:
|
||||
COPILOT_AGENT_RUNNER_TYPE: STANDALONE
|
||||
COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }}
|
||||
|
|
@ -662,7 +670,7 @@ jobs:
|
|||
else
|
||||
echo "No session-state directory found at $SESSION_STATE_DIR"
|
||||
fi
|
||||
- name: Stop MCP gateway
|
||||
- name: Stop MCP Gateway
|
||||
if: always()
|
||||
continue-on-error: true
|
||||
env:
|
||||
|
|
@ -695,10 +703,11 @@ jobs:
|
|||
if-no-files-found: warn
|
||||
- name: Ingest agent output
|
||||
id: collect_output
|
||||
if: always()
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
|
||||
env:
|
||||
GH_AW_SAFE_OUTPUTS: ${{ env.GH_AW_SAFE_OUTPUTS }}
|
||||
GH_AW_ALLOWED_DOMAINS: "*.pythonhosted.org,anaconda.org,api.business.githubcopilot.com,api.enterprise.githubcopilot.com,api.github.com,api.githubcopilot.com,api.individual.githubcopilot.com,binstar.org,bootstrap.pypa.io,conda.anaconda.org,conda.binstar.org,default,files.pythonhosted.org,github.com,host.docker.internal,pip.pypa.io,pypi.org,pypi.python.org,raw.githubusercontent.com,registry.npmjs.org,repo.anaconda.com,repo.continuum.io,telemetry.enterprise.githubcopilot.com"
|
||||
GH_AW_ALLOWED_DOMAINS: "*.pythonhosted.org,anaconda.org,api.business.githubcopilot.com,api.enterprise.githubcopilot.com,api.github.com,api.githubcopilot.com,api.individual.githubcopilot.com,api.snapcraft.io,archive.ubuntu.com,azure.archive.ubuntu.com,binstar.org,bootstrap.pypa.io,conda.anaconda.org,conda.binstar.org,crl.geotrust.com,crl.globalsign.com,crl.identrust.com,crl.sectigo.com,crl.thawte.com,crl.usertrust.com,crl.verisign.com,crl3.digicert.com,crl4.digicert.com,crls.ssl.com,files.pythonhosted.org,github.com,host.docker.internal,json-schema.org,json.schemastore.org,keyserver.ubuntu.com,ocsp.digicert.com,ocsp.geotrust.com,ocsp.globalsign.com,ocsp.identrust.com,ocsp.sectigo.com,ocsp.ssl.com,ocsp.thawte.com,ocsp.usertrust.com,ocsp.verisign.com,packagecloud.io,packages.cloud.google.com,packages.microsoft.com,pip.pypa.io,ppa.launchpad.net,pypi.org,pypi.python.org,raw.githubusercontent.com,registry.npmjs.org,repo.anaconda.com,repo.continuum.io,s.symcb.com,s.symcd.com,security.ubuntu.com,telemetry.enterprise.githubcopilot.com,ts-crl.ws.symantec.com,ts-ocsp.ws.symantec.com"
|
||||
GITHUB_SERVER_URL: ${{ github.server_url }}
|
||||
GITHUB_API_URL: ${{ github.api_url }}
|
||||
with:
|
||||
|
|
@ -733,7 +742,7 @@ jobs:
|
|||
setupGlobals(core, github, context, exec, io);
|
||||
const { main } = require('/opt/gh-aw/actions/parse_copilot_log.cjs');
|
||||
await main();
|
||||
- name: Parse MCP gateway logs for step summary
|
||||
- name: Parse MCP Gateway logs for step summary
|
||||
if: always()
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
|
||||
with:
|
||||
|
|
@ -751,7 +760,12 @@ jobs:
|
|||
# Fix permissions on firewall logs so they can be uploaded as artifacts
|
||||
# AWF runs with sudo, creating files owned by root
|
||||
sudo chmod -R a+r /tmp/gh-aw/sandbox/firewall/logs 2>/dev/null || true
|
||||
awf logs summary | tee -a "$GITHUB_STEP_SUMMARY"
|
||||
# Only run awf logs summary if awf command exists (it may not be installed if workflow failed before install step)
|
||||
if command -v awf &> /dev/null; then
|
||||
awf logs summary | tee -a "$GITHUB_STEP_SUMMARY"
|
||||
else
|
||||
echo 'AWF binary not installed, skipping firewall log summary'
|
||||
fi
|
||||
- name: Upload agent artifacts
|
||||
if: always()
|
||||
continue-on-error: true
|
||||
|
|
@ -777,16 +791,14 @@ jobs:
|
|||
runs-on: ubuntu-slim
|
||||
permissions:
|
||||
contents: read
|
||||
discussions: write
|
||||
issues: write
|
||||
pull-requests: write
|
||||
outputs:
|
||||
noop_message: ${{ steps.noop.outputs.noop_message }}
|
||||
tools_reported: ${{ steps.missing_tool.outputs.tools_reported }}
|
||||
total_count: ${{ steps.missing_tool.outputs.total_count }}
|
||||
steps:
|
||||
- name: Setup Scripts
|
||||
uses: github/gh-aw/actions/setup@a0e753a02a1b3edc578b5c4c9d5d4eaf81ced5bd # v0.43.15
|
||||
uses: github/gh-aw/actions/setup@v0.45.0
|
||||
with:
|
||||
destination: /opt/gh-aw/actions
|
||||
- name: Download agent output artifact
|
||||
|
|
@ -807,8 +819,6 @@ jobs:
|
|||
GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }}
|
||||
GH_AW_NOOP_MAX: 1
|
||||
GH_AW_WORKFLOW_NAME: "A3 Python Code Analysis"
|
||||
GH_AW_WORKFLOW_SOURCE: "z3prover/z3/a3/a3-python-v2.md@a91c5c58bd975f336bf5b744885ffd4b36b2d2ec"
|
||||
GH_AW_WORKFLOW_SOURCE_URL: "${{ github.server_url }}/z3prover/z3/tree/a91c5c58bd975f336bf5b744885ffd4b36b2d2ec/a3/a3-python-v2.md"
|
||||
GH_AW_TRACKER_ID: "a3-python-analysis"
|
||||
with:
|
||||
github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
|
|
@ -823,8 +833,6 @@ jobs:
|
|||
env:
|
||||
GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }}
|
||||
GH_AW_WORKFLOW_NAME: "A3 Python Code Analysis"
|
||||
GH_AW_WORKFLOW_SOURCE: "z3prover/z3/a3/a3-python-v2.md@a91c5c58bd975f336bf5b744885ffd4b36b2d2ec"
|
||||
GH_AW_WORKFLOW_SOURCE_URL: "${{ github.server_url }}/z3prover/z3/tree/a91c5c58bd975f336bf5b744885ffd4b36b2d2ec/a3/a3-python-v2.md"
|
||||
GH_AW_TRACKER_ID: "a3-python-analysis"
|
||||
with:
|
||||
github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
|
|
@ -839,8 +847,6 @@ jobs:
|
|||
env:
|
||||
GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }}
|
||||
GH_AW_WORKFLOW_NAME: "A3 Python Code Analysis"
|
||||
GH_AW_WORKFLOW_SOURCE: "z3prover/z3/a3/a3-python-v2.md@a91c5c58bd975f336bf5b744885ffd4b36b2d2ec"
|
||||
GH_AW_WORKFLOW_SOURCE_URL: "${{ github.server_url }}/z3prover/z3/tree/a91c5c58bd975f336bf5b744885ffd4b36b2d2ec/a3/a3-python-v2.md"
|
||||
GH_AW_TRACKER_ID: "a3-python-analysis"
|
||||
GH_AW_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
GH_AW_AGENT_CONCLUSION: ${{ needs.agent.result }}
|
||||
|
|
@ -860,8 +866,6 @@ jobs:
|
|||
env:
|
||||
GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }}
|
||||
GH_AW_WORKFLOW_NAME: "A3 Python Code Analysis"
|
||||
GH_AW_WORKFLOW_SOURCE: "z3prover/z3/a3/a3-python-v2.md@a91c5c58bd975f336bf5b744885ffd4b36b2d2ec"
|
||||
GH_AW_WORKFLOW_SOURCE_URL: "${{ github.server_url }}/z3prover/z3/tree/a91c5c58bd975f336bf5b744885ffd4b36b2d2ec/a3/a3-python-v2.md"
|
||||
GH_AW_TRACKER_ID: "a3-python-analysis"
|
||||
GH_AW_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
GH_AW_AGENT_CONCLUSION: ${{ needs.agent.result }}
|
||||
|
|
@ -874,25 +878,6 @@ jobs:
|
|||
setupGlobals(core, github, context, exec, io);
|
||||
const { main } = require('/opt/gh-aw/actions/handle_noop_message.cjs');
|
||||
await main();
|
||||
- name: Update reaction comment with completion status
|
||||
id: conclusion
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8
|
||||
env:
|
||||
GH_AW_AGENT_OUTPUT: ${{ env.GH_AW_AGENT_OUTPUT }}
|
||||
GH_AW_COMMENT_ID: ${{ needs.activation.outputs.comment_id }}
|
||||
GH_AW_COMMENT_REPO: ${{ needs.activation.outputs.comment_repo }}
|
||||
GH_AW_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
GH_AW_WORKFLOW_NAME: "A3 Python Code Analysis"
|
||||
GH_AW_TRACKER_ID: "a3-python-analysis"
|
||||
GH_AW_AGENT_CONCLUSION: ${{ needs.agent.result }}
|
||||
GH_AW_DETECTION_CONCLUSION: ${{ needs.detection.result }}
|
||||
with:
|
||||
github-token: ${{ secrets.GH_AW_GITHUB_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
const { setupGlobals } = require('/opt/gh-aw/actions/setup_globals.cjs');
|
||||
setupGlobals(core, github, context, exec, io);
|
||||
const { main } = require('/opt/gh-aw/actions/notify_comment_error.cjs');
|
||||
await main();
|
||||
|
||||
detection:
|
||||
needs: agent
|
||||
|
|
@ -906,7 +891,7 @@ jobs:
|
|||
success: ${{ steps.parse_results.outputs.success }}
|
||||
steps:
|
||||
- name: Setup Scripts
|
||||
uses: github/gh-aw/actions/setup@a0e753a02a1b3edc578b5c4c9d5d4eaf81ced5bd # v0.43.15
|
||||
uses: github/gh-aw/actions/setup@v0.45.0
|
||||
with:
|
||||
destination: /opt/gh-aw/actions
|
||||
- name: Download agent artifacts
|
||||
|
|
@ -948,7 +933,7 @@ jobs:
|
|||
env:
|
||||
COPILOT_GITHUB_TOKEN: ${{ secrets.COPILOT_GITHUB_TOKEN }}
|
||||
- name: Install GitHub Copilot CLI
|
||||
run: /opt/gh-aw/actions/install_copilot_cli.sh 0.0.407
|
||||
run: /opt/gh-aw/actions/install_copilot_cli.sh 0.0.410
|
||||
- name: Execute GitHub Copilot CLI
|
||||
id: agentic_execution
|
||||
# Copilot CLI tool arguments (sorted):
|
||||
|
|
@ -1010,8 +995,6 @@ jobs:
|
|||
GH_AW_TRACKER_ID: "a3-python-analysis"
|
||||
GH_AW_WORKFLOW_ID: "a3-python-v2"
|
||||
GH_AW_WORKFLOW_NAME: "A3 Python Code Analysis"
|
||||
GH_AW_WORKFLOW_SOURCE: "z3prover/z3/a3/a3-python-v2.md@a91c5c58bd975f336bf5b744885ffd4b36b2d2ec"
|
||||
GH_AW_WORKFLOW_SOURCE_URL: "${{ github.server_url }}/z3prover/z3/tree/a91c5c58bd975f336bf5b744885ffd4b36b2d2ec/a3/a3-python-v2.md"
|
||||
outputs:
|
||||
create_discussion_error_count: ${{ steps.process_safe_outputs.outputs.create_discussion_error_count }}
|
||||
create_discussion_errors: ${{ steps.process_safe_outputs.outputs.create_discussion_errors }}
|
||||
|
|
@ -1019,7 +1002,7 @@ jobs:
|
|||
process_safe_outputs_temporary_id_map: ${{ steps.process_safe_outputs.outputs.temporary_id_map }}
|
||||
steps:
|
||||
- name: Setup Scripts
|
||||
uses: github/gh-aw/actions/setup@a0e753a02a1b3edc578b5c4c9d5d4eaf81ced5bd # v0.43.15
|
||||
uses: github/gh-aw/actions/setup@v0.45.0
|
||||
with:
|
||||
destination: /opt/gh-aw/actions
|
||||
- name: Download agent output artifact
|
||||
|
|
|
|||
9
.github/workflows/a3-python-v2.md
vendored
9
.github/workflows/a3-python-v2.md
vendored
|
|
@ -8,9 +8,7 @@ permissions:
|
|||
issues: read
|
||||
pull-requests: read
|
||||
network:
|
||||
allowed: [default, python]
|
||||
tools:
|
||||
serena: ["python"]
|
||||
allowed: [defaults, python]
|
||||
safe-outputs:
|
||||
create-issue:
|
||||
labels:
|
||||
|
|
@ -26,9 +24,9 @@ tracker-id: a3-python-analysis
|
|||
steps:
|
||||
- name: Checkout Python source files
|
||||
run: |
|
||||
git sparse-checkout add src
|
||||
git sparse-checkout init --cone
|
||||
git sparse-checkout set src
|
||||
echo "Source files checked out for Python analysis"
|
||||
source: z3prover/z3/a3/a3-python-v2.md@a91c5c58bd975f336bf5b744885ffd4b36b2d2ec
|
||||
---
|
||||
|
||||
# A3 Python Code Analysis Agent
|
||||
|
|
@ -38,7 +36,6 @@ You are an expert Python code analyst using the a3-python tool to identify bugs
|
|||
## Current Context
|
||||
|
||||
- **Repository**: ${{ github.repository }}
|
||||
- **Analysis Date**: $(date +%Y-%m-%d)
|
||||
- **Workspace**: ${{ github.workspace }}
|
||||
|
||||
## Phase 1: Install and Setup a3-python
|
||||
|
|
|
|||
3
.github/workflows/a3-python.lock.yml
generated
vendored
3
.github/workflows/a3-python.lock.yml
generated
vendored
|
|
@ -98,7 +98,8 @@ jobs:
|
|||
run: bash /opt/gh-aw/actions/create_gh_aw_tmp_dir.sh
|
||||
- name: Checkout Python source files
|
||||
run: |-
|
||||
git sparse-checkout add src
|
||||
git sparse-checkout init --cone
|
||||
git sparse-checkout set src
|
||||
echo "Python source files checked out from src directory"
|
||||
|
||||
- name: Configure Git credentials
|
||||
|
|
|
|||
4
.github/workflows/a3-python.md
vendored
4
.github/workflows/a3-python.md
vendored
|
|
@ -24,7 +24,8 @@ tracker-id: a3-python-analysis
|
|||
steps:
|
||||
- name: Checkout Python source files
|
||||
run: |
|
||||
git sparse-checkout add src
|
||||
git sparse-checkout init --cone
|
||||
git sparse-checkout set src
|
||||
echo "Python source files checked out from src directory"
|
||||
---
|
||||
|
||||
|
|
@ -35,7 +36,6 @@ You are an expert Python code analyst using the a3-python tool to identify bugs
|
|||
## Current Context
|
||||
|
||||
- **Repository**: ${{ github.repository }}
|
||||
- **Analysis Date**: $(date +%Y-%m-%d)
|
||||
- **Workspace**: ${{ github.workspace }}
|
||||
|
||||
## Phase 1: Install and Setup a3-python
|
||||
|
|
|
|||
1
.github/workflows/code-simplifier.md
vendored
1
.github/workflows/code-simplifier.md
vendored
|
|
@ -39,7 +39,6 @@ Analyze recently modified code from the last 24 hours and apply refinements that
|
|||
## Current Context
|
||||
|
||||
- **Repository**: ${{ github.repository }}
|
||||
- **Analysis Date**: $(date +%Y-%m-%d)
|
||||
- **Workspace**: ${{ github.workspace }}
|
||||
|
||||
## Phase 1: Identify Recently Modified Code
|
||||
|
|
|
|||
|
|
@ -1,3 +0,0 @@
|
|||
**/genaiscript.d.ts
|
||||
**/package-lock.json
|
||||
**/yarn.lock
|
||||
|
|
@ -1,287 +0,0 @@
|
|||
# RCF API Implementation Summary
|
||||
|
||||
## Overview
|
||||
|
||||
This document summarizes the implementation of RCF (Real Closed Field) bindings across multiple Z3 language APIs, addressing the #1 critical gap identified in [GitHub Discussion #8170](https://github.com/Z3Prover/z3/discussions/8170).
|
||||
|
||||
## What is RCF?
|
||||
|
||||
The Real Closed Field (RCF) API provides exact real arithmetic capabilities including:
|
||||
- **Transcendental numbers**: π (pi), e (Euler's constant)
|
||||
- **Algebraic numbers**: Roots of polynomials with exact representation
|
||||
- **Infinitesimals**: Numbers smaller than any positive real number
|
||||
- **Rational numbers**: Exact fraction arithmetic
|
||||
|
||||
The RCF API is useful for symbolic mathematics, exact real arithmetic, and problems requiring precise numerical representations beyond floating-point arithmetic.
|
||||
|
||||
## C API Foundation
|
||||
|
||||
The core C API is already complete and well-established:
|
||||
- **Header**: `src/api/z3_rcf.h` (321 lines)
|
||||
- **Implementation**: `src/api/api_rcf.cpp` (440 lines)
|
||||
- **Type definition**: `def_Type('RCF_NUM', 'Z3_rcf_num', 'RCFNumObj')` in `z3_api.h`
|
||||
- **Functions**: 31 C API functions for creation, arithmetic, comparison, introspection
|
||||
|
||||
All language bindings build on top of this existing C API.
|
||||
|
||||
## Language Implementations
|
||||
|
||||
### 1. C++ (`src/api/c++/z3++.h`)
|
||||
|
||||
**Status**: ✅ Complete (New Implementation)
|
||||
|
||||
**Changes**:
|
||||
- Added `#include<z3_rcf.h>` to imports
|
||||
- Added `rcf_num` class (230 lines) before closing namespace
|
||||
- Added helper functions: `rcf_pi()`, `rcf_e()`, `rcf_infinitesimal()`, `rcf_roots()`
|
||||
|
||||
**Features**:
|
||||
- RAII memory management (automatic Z3_rcf_del in destructor)
|
||||
- Full operator overloading: `+`, `-`, `*`, `/`, `==`, `!=`, `<`, `>`, `<=`, `>=`
|
||||
- Copy constructor and assignment operator
|
||||
- String conversion: `to_string()`, `to_decimal()`
|
||||
- Type queries: `is_rational()`, `is_algebraic()`, `is_infinitesimal()`, `is_transcendental()`
|
||||
- Arithmetic: `power()`, `inv()` (inverse)
|
||||
- Root finding: `rcf_roots(ctx, coeffs_vector)`
|
||||
|
||||
**Example**: `examples/c++/rcf_example.cpp` (130 lines)
|
||||
|
||||
**Build Integration**: No changes needed - automatically included via z3++.h
|
||||
|
||||
### 2. Java (`src/api/java/RCFNum.java`)
|
||||
|
||||
**Status**: ✅ Complete (New Implementation)
|
||||
|
||||
**New Files**:
|
||||
- `src/api/java/RCFNum.java` (390 lines)
|
||||
|
||||
**Features**:
|
||||
- Extends `Z3Object` for reference counting integration
|
||||
- Factory methods: `mkPi()`, `mkE()`, `mkInfinitesimal()`, `mkRoots()`
|
||||
- Arithmetic: `add()`, `sub()`, `mul()`, `div()`, `neg()`, `inv()`, `power()`
|
||||
- Comparisons: `lt()`, `gt()`, `le()`, `ge()`, `eq()`, `neq()`
|
||||
- Type queries: `isRational()`, `isAlgebraic()`, `isInfinitesimal()`, `isTranscendental()`
|
||||
- String conversion: `toString()`, `toString(boolean compact)`, `toDecimal(int precision)`
|
||||
- Automatic memory management via `Z3ReferenceQueue` and `RCFNumRef` inner class
|
||||
|
||||
**Example**: `examples/java/RCFExample.java` (135 lines)
|
||||
|
||||
**Build Integration**: No changes needed - automatically compiled with Java bindings
|
||||
|
||||
**Native Methods**: Automatically generated by `scripts/update_api.py` in `Native.java`:
|
||||
- `rcfMkRational()`, `rcfMkSmallInt()`, `rcfMkPi()`, `rcfMkE()`, `rcfMkInfinitesimal()`
|
||||
- `rcfMkRoots()`, `rcfAdd()`, `rcfSub()`, `rcfMul()`, `rcfDiv()`, `rcfNeg()`, `rcfInv()`, `rcfPower()`
|
||||
- `rcfLt()`, `rcfGt()`, `rcfLe()`, `rcfGe()`, `rcfEq()`, `rcfNeq()`
|
||||
- `rcfNumToString()`, `rcfNumToDecimalString()`
|
||||
- `rcfIsRational()`, `rcfIsAlgebraic()`, `rcfIsInfinitesimal()`, `rcfIsTranscendental()`
|
||||
- `rcfDel()`
|
||||
|
||||
### 3. C# / .NET (`src/api/dotnet/RCFNum.cs`)
|
||||
|
||||
**Status**: ✅ Complete (New Implementation)
|
||||
|
||||
**New Files**:
|
||||
- `src/api/dotnet/RCFNum.cs` (480 lines)
|
||||
|
||||
**Features**:
|
||||
- Extends `Z3Object` with `IDisposable` pattern
|
||||
- Factory methods: `MkPi()`, `MkE()`, `MkInfinitesimal()`, `MkRoots()`
|
||||
- Arithmetic: `Add()`, `Sub()`, `Mul()`, `Div()`, `Neg()`, `Inv()`, `Power()`
|
||||
- Full operator overloading: `+`, `-`, `*`, `/`, `==`, `!=`, `<`, `>`, `<=`, `>=`
|
||||
- Comparisons: `Lt()`, `Gt()`, `Le()`, `Ge()`, `Eq()`, `Neq()`
|
||||
- Type queries: `IsRational()`, `IsAlgebraic()`, `IsInfinitesimal()`, `IsTranscendental()`
|
||||
- String conversion: `ToString()`, `ToString(bool compact)`, `ToDecimal(uint precision)`
|
||||
- Overrides: `Equals()`, `GetHashCode()` for proper equality semantics
|
||||
|
||||
**Example**: `examples/dotnet/RCFExample.cs` (130 lines)
|
||||
|
||||
**Build Integration**: No changes needed - automatically compiled with .NET bindings
|
||||
|
||||
**Native Methods**: Automatically generated by `scripts/update_api.py` in `Native.cs`:
|
||||
- Same methods as Java, using .NET P/Invoke conventions
|
||||
- `Z3_rcf_*` C functions wrapped with appropriate marshalling
|
||||
|
||||
### 4. TypeScript / JavaScript
|
||||
|
||||
**Status**: ✅ Complete (Already Working - Documented)
|
||||
|
||||
**Existing Support**:
|
||||
- `z3_rcf.h` is already in parse list (`src/api/js/scripts/parse-api.ts` line 13)
|
||||
- All 31 RCF C API functions automatically generated as low-level bindings
|
||||
- TypeScript bindings auto-generated from C API headers
|
||||
|
||||
**Functions Available** (via low-level API):
|
||||
- `Z3.rcf_mk_rational()`, `Z3.rcf_mk_small_int()`, `Z3.rcf_mk_pi()`, `Z3.rcf_mk_e()`, `Z3.rcf_mk_infinitesimal()`
|
||||
- `Z3.rcf_mk_roots()`, `Z3.rcf_add()`, `Z3.rcf_sub()`, `Z3.rcf_mul()`, `Z3.rcf_div()`, `Z3.rcf_neg()`, `Z3.rcf_inv()`, `Z3.rcf_power()`
|
||||
- `Z3.rcf_lt()`, `Z3.rcf_gt()`, `Z3.rcf_le()`, `Z3.rcf_ge()`, `Z3.rcf_eq()`, `Z3.rcf_neq()`
|
||||
- `Z3.rcf_num_to_string()`, `Z3.rcf_num_to_decimal_string()`
|
||||
- `Z3.rcf_is_rational()`, `Z3.rcf_is_algebraic()`, `Z3.rcf_is_infinitesimal()`, `Z3.rcf_is_transcendental()`
|
||||
- `Z3.rcf_del()`
|
||||
|
||||
**Example**: `src/api/js/examples/low-level/rcf-example.ts` (165 lines)
|
||||
|
||||
**Note**: No high-level wrapper needed - low-level API is sufficient and matches Python's ctypes-style usage.
|
||||
|
||||
### 5. Python
|
||||
|
||||
**Status**: ✅ Already Complete (Reference Implementation)
|
||||
|
||||
**Existing Files**:
|
||||
- `src/api/python/z3/z3rcf.py` (complete implementation)
|
||||
- High-level `RCFNum` class with operator overloading
|
||||
- Helper functions: `Pi()`, `E()`, `MkInfinitesimal()`, `MkRoots()`
|
||||
|
||||
**Reference**: This implementation served as the design reference for other languages.
|
||||
|
||||
### 6. OCaml
|
||||
|
||||
**Status**: ⚠️ Not Verified
|
||||
|
||||
**Notes**:
|
||||
- OCaml bindings in `src/api/ml/` were not modified
|
||||
- The coherence checker showed OCaml has 95.5% coverage with "zero missing features"
|
||||
- RCF support status in OCaml needs separate verification
|
||||
- May already be complete through automatic C API bindings
|
||||
|
||||
## Example Output
|
||||
|
||||
All examples demonstrate the same four scenarios:
|
||||
|
||||
### 1. Basic Example (Pi and E)
|
||||
```
|
||||
pi = 3.1415926535897...
|
||||
e = 2.7182818284590...
|
||||
pi + e = 5.8598744820487...
|
||||
pi * e = 8.5397342226735...
|
||||
```
|
||||
|
||||
### 2. Rational Example
|
||||
```
|
||||
1/2 = 1/2
|
||||
1/3 = 1/3
|
||||
1/2 + 1/3 = 5/6
|
||||
Is 1/2 rational? yes
|
||||
```
|
||||
|
||||
### 3. Roots Example (sqrt(2))
|
||||
```
|
||||
Roots of x^2 - 2 = 0:
|
||||
root[0] = -1.4142135623730...
|
||||
root[1] = 1.4142135623730...
|
||||
is_algebraic = yes
|
||||
```
|
||||
|
||||
### 4. Infinitesimal Example
|
||||
```
|
||||
eps = epsilon
|
||||
Is eps infinitesimal? yes
|
||||
eps < 1/1000000000? yes
|
||||
```
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### Build Testing
|
||||
1. **C++**: Build Z3, compile and run `build/examples/c++/rcf_example`
|
||||
2. **Java**: Build with `--java` flag, compile and run `RCFExample.java`
|
||||
3. **C#**: Build with `--dotnet` flag, compile and run `RCFExample.cs`
|
||||
4. **TypeScript**: Install z3-solver npm package, run with `ts-node rcf-example.ts`
|
||||
|
||||
### Unit Testing
|
||||
The implementations should be tested with:
|
||||
- Basic arithmetic operations
|
||||
- Comparison operations
|
||||
- Type queries (rational, algebraic, infinitesimal, transcendental)
|
||||
- Polynomial root finding
|
||||
- Memory management (no leaks)
|
||||
- Cross-context error handling
|
||||
|
||||
### Integration Testing
|
||||
- Use RCF numerals in actual Z3 solving scenarios
|
||||
- Verify decimal approximations are accurate
|
||||
- Test edge cases (division by zero, empty polynomial coefficients)
|
||||
|
||||
## Design Decisions
|
||||
|
||||
### Memory Management
|
||||
- **C++**: RAII with destructor calling `Z3_rcf_del`
|
||||
- **Java**: Reference queue with `RCFNumRef` finalizer
|
||||
- **C#**: `IDisposable` pattern with `DecRef` override
|
||||
- **TypeScript**: Manual `Z3.rcf_del()` calls (matches low-level API style)
|
||||
|
||||
### API Style
|
||||
- **C++**: Lowercase with underscores (STL style), operator overloading
|
||||
- **Java**: CamelCase methods, factory methods for constants
|
||||
- **C#**: PascalCase methods, operator overloading, factory methods
|
||||
- **TypeScript**: Snake_case C API functions directly
|
||||
|
||||
### Error Handling
|
||||
- All implementations validate context matching between operations
|
||||
- C++ and C# use exceptions
|
||||
- Java uses `Z3Exception`
|
||||
- TypeScript relies on C API error handlers
|
||||
|
||||
## Integration Points
|
||||
|
||||
### No Build System Changes Required
|
||||
All implementations integrate seamlessly:
|
||||
- C++ is header-only (included in z3++.h)
|
||||
- Java auto-compiles with existing Java bindings
|
||||
- C# auto-compiles with existing .NET bindings
|
||||
- TypeScript auto-generates from headers
|
||||
|
||||
### No API Generation Changes Required
|
||||
The `scripts/update_api.py` already:
|
||||
- Parses `z3_rcf.h` (via `def_API` macros)
|
||||
- Generates Java `Native.java` methods
|
||||
- Generates C# `Native.cs` methods
|
||||
- Generates TypeScript type definitions
|
||||
|
||||
### Documentation
|
||||
- Examples serve as primary documentation
|
||||
- Each class has comprehensive doc comments
|
||||
- Public methods include parameter descriptions
|
||||
- Examples show realistic usage patterns
|
||||
|
||||
## API Coverage Summary
|
||||
|
||||
| Language | Before | After | Functions | Lines of Code | Status |
|
||||
|----------|--------|-------|-----------|---------------|--------|
|
||||
| **C API** | 100% | 100% | 31 | 761 | ✅ (Existing) |
|
||||
| **C++** | 0% | 100% | 31 | ~250 | ✅ (New) |
|
||||
| **Java** | 0% | 100% | 31 | ~390 | ✅ (New) |
|
||||
| **C# (.NET)** | 0% | 100% | 31 | ~480 | ✅ (New) |
|
||||
| **TypeScript/JS** | 100% | 100% | 31 | ~165 (example) | ✅ (Documented) |
|
||||
| **Python** | 100% | 100% | 38 | ~300 | ✅ (Existing) |
|
||||
| **OCaml** | Unknown | Unknown | ? | ? | ⚠️ (Not Verified) |
|
||||
|
||||
**Total New Code**: ~1,285 lines across 3 languages + 595 lines of examples
|
||||
|
||||
## Future Work
|
||||
|
||||
### Potential Enhancements
|
||||
1. **OCaml Verification**: Confirm RCF support in OCaml bindings
|
||||
2. **High-level TypeScript API**: Create optional `RCFNum` class wrapper for type safety
|
||||
3. **Additional Tests**: Unit tests for each language
|
||||
4. **Performance Benchmarks**: Compare RCF vs floating-point for precision-critical computations
|
||||
5. **Documentation**: Add RCF section to Z3 guide with theory background
|
||||
|
||||
### Other API Gaps
|
||||
This PR addresses the #1 critical gap. According to discussion #8170, other gaps include:
|
||||
- **TypeScript FPA API** (81 functions) - #2 priority
|
||||
- **TypeScript String API** (28 functions) - #3 priority
|
||||
- **Statistics API** in TypeScript (9 functions)
|
||||
- **Print mode control** in Python, C#, TypeScript
|
||||
|
||||
## References
|
||||
|
||||
- **GitHub Discussion**: [#8170 - API Coherence Analysis](https://github.com/Z3Prover/z3/discussions/8170)
|
||||
- **C API Header**: `src/api/z3_rcf.h`
|
||||
- **C Implementation**: `src/api/api_rcf.cpp`
|
||||
- **Python Reference**: `src/api/python/z3/z3rcf.py`
|
||||
- **Realclosure Module**: `src/math/realclosure/realclosure.h` (underlying implementation)
|
||||
|
||||
## Conclusion
|
||||
|
||||
This implementation successfully adds comprehensive RCF support to 3 languages (C++, Java, C#) where it was completely missing, and documents the existing TypeScript support. The implementations follow established patterns in each language, integrate seamlessly with existing build systems, and provide identical functionality across all platforms.
|
||||
|
||||
The RCF API enables Z3 users to perform exact real arithmetic with transcendental and algebraic numbers, filling a critical gap identified by the API coherence analysis.
|
||||
264
SPECBOT.md
264
SPECBOT.md
|
|
@ -1,264 +0,0 @@
|
|||
# SpecBot: Automatic Specification Mining Agent
|
||||
|
||||
SpecBot is a GitHub Agentic Workflow that automatically annotates Z3 source code with formal specifications using LLM-based invariant synthesis.
|
||||
|
||||
## Overview
|
||||
|
||||
SpecBot analyzes C++ classes in the Z3 theorem prover codebase and automatically adds:
|
||||
- **Class Invariants**: Properties that must always hold for all instances of a class
|
||||
- **Pre-conditions**: Conditions required before a function executes
|
||||
- **Post-conditions**: Guarantees about function results and side effects
|
||||
|
||||
This approach is inspired by the paper ["Classinvgen: Class invariant synthesis using large language models"](https://arxiv.org/abs/2502.18917).
|
||||
|
||||
## What It Does
|
||||
|
||||
### Automatic Specification Mining
|
||||
|
||||
SpecBot uses LLM reasoning to:
|
||||
1. **Identify target classes** with complex state management
|
||||
2. **Analyze code structure** including members, methods, and dependencies
|
||||
3. **Mine specifications** using multi-step reasoning about code semantics
|
||||
4. **Generate annotations** using Z3's existing assertion macros (`SASSERT`, `ENSURE`, `VERIFY`)
|
||||
5. **Create discussions** documenting the proposed specifications for human review and implementation
|
||||
|
||||
### Example Annotations
|
||||
|
||||
**Class Invariant:**
|
||||
```cpp
|
||||
class vector {
|
||||
private:
|
||||
void check_invariant() const {
|
||||
SASSERT(m_size <= m_capacity);
|
||||
SASSERT(m_data != nullptr || m_capacity == 0);
|
||||
}
|
||||
public:
|
||||
void push_back(int x) {
|
||||
check_invariant(); // Verify invariant
|
||||
// ... implementation
|
||||
check_invariant(); // Preserve invariant
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
**Pre-condition:**
|
||||
```cpp
|
||||
void set_value(int index, int value) {
|
||||
SASSERT(index >= 0); // Pre-condition
|
||||
SASSERT(index < m_size); // Pre-condition
|
||||
// ... implementation
|
||||
}
|
||||
```
|
||||
|
||||
**Post-condition:**
|
||||
```cpp
|
||||
int* allocate_buffer(size_t size) {
|
||||
SASSERT(size > 0); // Pre-condition
|
||||
int* result = new int[size];
|
||||
SASSERT(result != nullptr); // Post-condition
|
||||
return result;
|
||||
}
|
||||
```
|
||||
|
||||
## Triggers
|
||||
|
||||
### 1. Weekly Schedule
|
||||
- Automatically runs every week
|
||||
- Randomly selects 3-5 core classes for analysis
|
||||
- Focuses on high-impact components (AST, solvers, data structures)
|
||||
|
||||
### 2. Manual Trigger (workflow_dispatch)
|
||||
You can manually trigger SpecBot with optional parameters:
|
||||
- **target_path**: Specific directory or file (e.g., `src/ast/`, `src/smt/smt_context.cpp`)
|
||||
- **target_class**: Specific class name to analyze
|
||||
|
||||
To trigger manually:
|
||||
```bash
|
||||
# Analyze a specific directory
|
||||
gh workflow run specbot.lock.yml -f target_path=src/ast/
|
||||
|
||||
# Analyze a specific file
|
||||
gh workflow run specbot.lock.yml -f target_path=src/smt/smt_context.cpp
|
||||
|
||||
# Analyze a specific class
|
||||
gh workflow run specbot.lock.yml -f target_class=ast_manager
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Workflow Files
|
||||
- **`.github/workflows/specbot.md`**: Workflow definition (compile this to update)
|
||||
- **`.github/agentics/specbot.md`**: Agent prompt (edit without recompilation!)
|
||||
- **`.github/workflows/specbot.lock.yml`**: Compiled workflow (auto-generated)
|
||||
|
||||
### Key Settings
|
||||
- **Schedule**: Weekly (fuzzy scheduling to distribute load)
|
||||
- **Timeout**: 45 minutes
|
||||
- **Permissions**: Read-only (contents, issues, pull-requests)
|
||||
- **Tools**: GitHub API, bash, file operations (view, glob, grep, edit)
|
||||
- **Safe Outputs**: Creates pull requests, reports missing tools as issues
|
||||
|
||||
## Methodology
|
||||
|
||||
SpecBot follows a systematic approach to specification mining:
|
||||
|
||||
### 1. Class Selection
|
||||
- Prioritizes classes with multiple data members and complex state
|
||||
- Focuses on public/protected methods needing contracts
|
||||
- Skips simple POD structs and well-annotated code
|
||||
|
||||
### 2. Code Analysis
|
||||
- Parses header (.h) and implementation (.cpp) files
|
||||
- Maps member variables, methods, and constructors
|
||||
- Identifies resource management patterns
|
||||
|
||||
### 3. Specification Synthesis
|
||||
Uses LLM reasoning to infer:
|
||||
- **Invariants**: From member relationships, constructors, and state-modifying methods
|
||||
- **Pre-conditions**: From argument constraints and defensive code patterns
|
||||
- **Post-conditions**: From return value properties and guaranteed side effects
|
||||
|
||||
### 4. Annotation Generation
|
||||
- Uses Z3's existing assertion macros
|
||||
- Adds explanatory comments for complex invariants
|
||||
- Follows Z3's coding conventions
|
||||
- Guards expensive checks with `DEBUG` macros
|
||||
|
||||
### 5. Discussion Creation
|
||||
Creates a discussion in the "Agentic Workflows" category with:
|
||||
- Detailed description of specifications identified
|
||||
- Rationale for each assertion
|
||||
- Human review and implementation recommendations
|
||||
|
||||
## Best Practices
|
||||
|
||||
### What SpecBot Does Well ✅
|
||||
- Identifies non-trivial invariants (not just null checks)
|
||||
- Respects Z3's coding conventions
|
||||
- Uses existing helper methods (e.g., `well_formed()`, `is_valid()`)
|
||||
- Groups related assertions logically
|
||||
- Considers performance impact
|
||||
|
||||
### What SpecBot Avoids ❌
|
||||
- Trivial assertions that add no value
|
||||
- Assertions with side effects
|
||||
- Expensive checks without DEBUG guards
|
||||
- Duplicating existing assertions
|
||||
- Changing any program behavior
|
||||
|
||||
## Human Review Required
|
||||
|
||||
SpecBot is a **specification synthesis assistant**, not a replacement for human expertise:
|
||||
- **Review all proposed assertions** for correctness
|
||||
- **Validate complex invariants** against code semantics
|
||||
- **Check performance impact** of assertion checks
|
||||
- **Refine specifications** based on domain knowledge
|
||||
- **Implement changes manually** after review
|
||||
- **Test changes** before applying to the codebase
|
||||
|
||||
LLMs can occasionally hallucinate or miss nuances, so human oversight is essential.
|
||||
|
||||
## Output Format
|
||||
|
||||
### Discussion Structure
|
||||
```markdown
|
||||
## ✨ Automatic Specification Mining
|
||||
|
||||
### 📋 Classes Annotated
|
||||
- `ClassName` in `src/path/to/file.cpp`
|
||||
|
||||
### 🔍 Specifications Added
|
||||
|
||||
#### Class Invariants
|
||||
- **Invariant**: [description]
|
||||
- **Assertion**: `SASSERT([expression])`
|
||||
- **Rationale**: [why this invariant is important]
|
||||
|
||||
#### Pre-conditions
|
||||
- **Method**: `method_name()`
|
||||
- **Pre-condition**: [description]
|
||||
- **Assertion**: `SASSERT([expression])`
|
||||
|
||||
#### Post-conditions
|
||||
- **Method**: `method_name()`
|
||||
- **Post-condition**: [description]
|
||||
- **Assertion**: `SASSERT([expression])`
|
||||
|
||||
### 🎯 Goals Achieved
|
||||
- ✅ Improved code documentation
|
||||
- ✅ Early bug detection through runtime checks
|
||||
- ✅ Better understanding of class contracts
|
||||
|
||||
*🤖 Generated by SpecBot - Automatic Specification Mining Agent*
|
||||
```
|
||||
|
||||
## Editing the Agent
|
||||
|
||||
### Without Recompilation (Recommended)
|
||||
Edit `.github/agentics/specbot.md` to modify:
|
||||
- Agent instructions and guidelines
|
||||
- Specification synthesis strategies
|
||||
- Discussion output formatting
|
||||
- Error handling behavior
|
||||
|
||||
Changes take effect immediately on the next run.
|
||||
|
||||
### With Recompilation (For Config Changes)
|
||||
Edit `.github/workflows/specbot.md` and run:
|
||||
```bash
|
||||
gh aw compile specbot
|
||||
```
|
||||
|
||||
Recompilation is needed for:
|
||||
- Changing triggers (schedule, workflow_dispatch)
|
||||
- Modifying permissions or tools
|
||||
- Adjusting timeout or safe outputs (e.g., switching from PR to Discussion)
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Workflow Not Running
|
||||
- Check that the compiled `.lock.yml` file is committed
|
||||
- Verify the workflow is enabled in repository settings
|
||||
- Review GitHub Actions logs for errors
|
||||
|
||||
### No Specifications Generated
|
||||
- The selected classes may already be well-annotated
|
||||
- Code may be too complex for confident specification synthesis
|
||||
- Check workflow logs for analysis details
|
||||
|
||||
### Compilation Errors
|
||||
If assertions cause build errors:
|
||||
- Review assertion syntax and Z3 macro usage
|
||||
- Verify that assertions don't access invalid members
|
||||
- Check that expressions are well-formed
|
||||
|
||||
## Benefits
|
||||
|
||||
### For Developers
|
||||
- **Documentation**: Specifications serve as precise documentation
|
||||
- **Bug Detection**: Runtime assertions catch violations early
|
||||
- **Understanding**: Clear contracts improve code comprehension
|
||||
- **Maintenance**: Invariants help prevent bugs during refactoring
|
||||
|
||||
### For Verification
|
||||
- **Foundation**: Specifications enable formal verification
|
||||
- **Testing**: Assertions strengthen test coverage
|
||||
- **Debugging**: Contract violations pinpoint error locations
|
||||
- **Confidence**: Specifications increase correctness confidence
|
||||
|
||||
## References
|
||||
|
||||
- **Paper**: [Classinvgen: Class invariant synthesis using large language models (arXiv:2502.18917)](https://arxiv.org/abs/2502.18917)
|
||||
- **Approach**: LLM-based specification mining for object-oriented code
|
||||
- **Related**: Design by Contract, Programming by Contract (Bertrand Meyer)
|
||||
|
||||
## Contributing
|
||||
|
||||
To improve SpecBot:
|
||||
1. Edit `.github/agentics/specbot.md` for prompt improvements
|
||||
2. Provide feedback on generated specifications via PR reviews
|
||||
3. Report issues or suggest enhancements through GitHub issues
|
||||
|
||||
## License
|
||||
|
||||
SpecBot is part of the Z3 theorem prover project and follows the same license (MIT).
|
||||
|
|
@ -1,316 +0,0 @@
|
|||
# ============================================================================
|
||||
# DEPRECATION NOTICE
|
||||
# ============================================================================
|
||||
# This Azure Pipelines configuration has been migrated to GitHub Actions.
|
||||
# See .github/workflows/ci.yml for the new CI pipeline.
|
||||
# See .github/workflows/CI_MIGRATION.md for migration details.
|
||||
#
|
||||
# This file is kept for reference and may be removed in the future.
|
||||
# ============================================================================
|
||||
|
||||
variables:
|
||||
cmakeJulia: '-DZ3_BUILD_JULIA_BINDINGS=True'
|
||||
cmakeJava: '-DZ3_BUILD_JAVA_BINDINGS=True'
|
||||
cmakeNet: '-DZ3_BUILD_DOTNET_BINDINGS=True'
|
||||
cmakePy: '-DZ3_BUILD_PYTHON_BINDINGS=True'
|
||||
cmakeStdArgs: '-DZ3_BUILD_DOTNET_BINDINGS=True -DZ3_BUILD_JAVA_BINDINGS=True -DZ3_BUILD_PYTHON_BINDINGS=True -G "Ninja" ../'
|
||||
asanEnv: 'CXXFLAGS="${CXXFLAGS} -fsanitize=address -fno-omit-frame-pointer" CFLAGS="${CFLAGS} -fsanitize=address -fno-omit-frame-pointer"'
|
||||
ubsanEnv: 'CXXFLAGS="${CXXFLAGS} -fsanitize=undefined" CFLAGS="${CFLAGS} -fsanitize=undefined"'
|
||||
msanEnv: 'CC=clang LDFLAGS="-L../libcxx/libcxx_msan/lib -lc++abi -Wl,-rpath=../libcxx/libcxx_msan/lib" CXX=clang++ CXXFLAGS="${CXXFLAGS} -stdlib=libc++ -fsanitize-memory-track-origins -fsanitize=memory -fPIE -fno-omit-frame-pointer -g -O2" CFLAGS="${CFLAGS} -stdlib=libc -fsanitize=memory -fsanitize-memory-track-origins -fno-omit-frame-pointer -g -O2"'
|
||||
|
||||
|
||||
# TBD:
|
||||
# test python bindings
|
||||
# build documentation
|
||||
# Asan, ubsan, msan
|
||||
# Disabled pending clang dependencies for std::unordered_map
|
||||
|
||||
jobs:
|
||||
|
||||
- job: "LinuxPythonDebug"
|
||||
displayName: "Ubuntu build - python make - debug"
|
||||
timeoutInMinutes: 90
|
||||
pool:
|
||||
vmImage: "ubuntu-latest"
|
||||
strategy:
|
||||
matrix:
|
||||
MT:
|
||||
cmdLine: 'python scripts/mk_make.py -d --java --dotnet'
|
||||
runRegressions: 'True'
|
||||
ST:
|
||||
cmdLine: './configure --single-threaded'
|
||||
runRegressions: 'False'
|
||||
steps:
|
||||
- script: $(cmdLine)
|
||||
- script: |
|
||||
set -e
|
||||
cd build
|
||||
make -j3
|
||||
make -j3 examples
|
||||
make -j3 test-z3
|
||||
cd ..
|
||||
- template: scripts/test-z3.yml
|
||||
- ${{if eq(variables['runRegressions'], 'True')}}:
|
||||
- template: scripts/test-regressions.yml
|
||||
|
||||
- job: "ManylinuxPythonBuildAmd64"
|
||||
displayName: "Python bindings (manylinux Centos AMD64) build"
|
||||
timeoutInMinutes: 90
|
||||
pool:
|
||||
vmImage: "ubuntu-latest"
|
||||
container: "quay.io/pypa/manylinux_2_34_x86_64:latest"
|
||||
condition: eq(1,1)
|
||||
steps:
|
||||
- script: "/opt/python/cp38-cp38/bin/python -m venv $PWD/env"
|
||||
- script: 'echo "##vso[task.prependpath]$PWD/env/bin"'
|
||||
- script: "pip install build git+https://github.com/rhelmot/auditwheel"
|
||||
- script: "cd src/api/python && python -m build && AUDITWHEEL_PLAT= auditwheel repair --best-plat dist/*.whl && cd ../../.."
|
||||
- script: "pip install ./src/api/python/wheelhouse/*.whl && python - <src/api/python/z3test.py z3 && python - <src/api/python/z3test.py z3num"
|
||||
|
||||
- job: ManyLinuxPythonBuildArm64
|
||||
timeoutInMinutes: 90
|
||||
displayName: "Python bindings (manylinux Centos ARM64 cross) build"
|
||||
variables:
|
||||
name: ManyLinux
|
||||
python: "/opt/python/cp37-cp37m/bin/python"
|
||||
pool:
|
||||
vmImage: "ubuntu-latest"
|
||||
container: "quay.io/pypa/manylinux2014_x86_64:latest"
|
||||
steps:
|
||||
- script: curl -L -o /tmp/arm-toolchain.tar.xz 'https://developer.arm.com/-/media/Files/downloads/gnu/13.3.rel1/binrel/arm-gnu-toolchain-13.3.rel1-x86_64-aarch64-none-linux-gnu.tar.xz'
|
||||
- script: mkdir -p /tmp/arm-toolchain/
|
||||
- script: tar xf /tmp/arm-toolchain.tar.xz -C /tmp/arm-toolchain/ --strip-components=1
|
||||
- script: "/opt/python/cp38-cp38/bin/python -m venv $PWD/env"
|
||||
- script: 'echo "##vso[task.prependpath]$PWD/env/bin"'
|
||||
- script: echo '##vso[task.prependpath]/tmp/arm-toolchain/bin'
|
||||
- script: echo '##vso[task.prependpath]/tmp/arm-toolchain/aarch64-none-linux-gnu/libc/usr/bin'
|
||||
- script: echo $PATH
|
||||
- script: "stat `which aarch64-none-linux-gnu-gcc`"
|
||||
- script: "pip install build git+https://github.com/rhelmot/auditwheel"
|
||||
- script: "cd src/api/python && CC=aarch64-none-linux-gnu-gcc CXX=aarch64-none-linux-gnu-g++ AR=aarch64-none-linux-gnu-ar LD=aarch64-none-linux-gnu-ld python -m build && AUDITWHEEL_PLAT= auditwheel repair --best-plat dist/*.whl && cd ../../.."
|
||||
|
||||
- job: "UbuntuOCaml"
|
||||
displayName: "Ubuntu with OCaml"
|
||||
timeoutInMinutes: 90
|
||||
pool:
|
||||
vmImage: "Ubuntu-latest"
|
||||
steps:
|
||||
- script: sudo apt-get install ocaml opam libgmp-dev
|
||||
- script: opam init -y
|
||||
- script: eval `opam config env`; opam install zarith ocamlfind -y
|
||||
- script: eval `opam config env`; python scripts/mk_make.py --ml
|
||||
- script: |
|
||||
set -e
|
||||
cd build
|
||||
eval `opam config env`
|
||||
make -j3
|
||||
make -j3 examples
|
||||
make -j3 test-z3
|
||||
cd ..
|
||||
- script: eval `opam config env`; ocamlfind install z3 build/api/ml/* -dll build/libz3.*
|
||||
- template: scripts/test-z3.yml
|
||||
- template: scripts/test-regressions.yml
|
||||
- template: scripts/generate-doc.yml
|
||||
|
||||
|
||||
- job: "UbuntuOCamlStatic"
|
||||
displayName: "Ubuntu with OCaml on z3-static"
|
||||
timeoutInMinutes: 90
|
||||
pool:
|
||||
vmImage: "Ubuntu-latest"
|
||||
steps:
|
||||
- script: sudo apt-get install ocaml opam libgmp-dev
|
||||
- script: opam init -y
|
||||
- script: eval `opam config env`; opam install zarith ocamlfind -y
|
||||
- script: eval `opam config env`; python scripts/mk_make.py --ml --staticlib
|
||||
- script: |
|
||||
set -e
|
||||
cd build
|
||||
eval `opam config env`
|
||||
make -j3
|
||||
make -j3 examples
|
||||
make -j3 test-z3
|
||||
cd ..
|
||||
- script: eval `opam config env`; ocamlfind install z3-static build/api/ml/* build/libz3-static.a
|
||||
- script: |
|
||||
set -e
|
||||
cd build
|
||||
eval `opam config env`
|
||||
make -j3
|
||||
make -j3 _ex_ml_example_post_install
|
||||
./ml_example_static.byte
|
||||
./ml_example_static_custom.byte
|
||||
./ml_example_static
|
||||
cd ..
|
||||
- template: scripts/test-z3.yml
|
||||
- template: scripts/test-regressions.yml
|
||||
- template: scripts/generate-doc.yml
|
||||
|
||||
- job: "LinuxMSan"
|
||||
displayName: "Ubuntu build - cmake"
|
||||
timeoutInMinutes: 90
|
||||
condition: eq(0,1)
|
||||
pool:
|
||||
vmImage: "ubuntu-latest"
|
||||
strategy:
|
||||
matrix:
|
||||
msanClang:
|
||||
cmdLine: '$(msanEnv) cmake $(cmakeStdArgs)'
|
||||
runUnitTest: 'True'
|
||||
runExample: 'False' # Examples don't seem to build with MSAN
|
||||
steps:
|
||||
- script: sudo apt-get install ninja-build libc++-dev libc++abi-dev
|
||||
- script: ./scripts/build_libcxx_msan.sh
|
||||
- script: |
|
||||
set -e
|
||||
mkdir build
|
||||
cd build
|
||||
$(cmdLine)
|
||||
ninja
|
||||
ninja test-z3
|
||||
cd ..
|
||||
- script: |
|
||||
cd build
|
||||
export MSAN_SYMBOLIZER_PATH=/usr/lib/llvm-6.0/bin/llvm-symbolizer
|
||||
./test-z3 -a
|
||||
cd ..
|
||||
condition: eq(variables['runUnitTest'], 'True')
|
||||
- ${{if eq(variables['runExample'], 'True')}}:
|
||||
- template: scripts/test-examples-cmake.yml
|
||||
# - template: scripts/test-jupyter.yml
|
||||
# - template: scripts/test-java-cmake.yml
|
||||
# - template: scripts/test-regressions.yml
|
||||
|
||||
- job: "UbuntuCMake"
|
||||
displayName: "Ubuntu build - cmake"
|
||||
timeoutInMinutes: 90
|
||||
pool:
|
||||
vmImage: "ubuntu-latest"
|
||||
strategy:
|
||||
matrix:
|
||||
releaseClang:
|
||||
setupCmd1: ''
|
||||
setupCmd2: ''
|
||||
buildCmd: 'CC=clang CXX=clang++ cmake -DCMAKE_BUILD_TYPE=Release $(cmakeStdArgs)'
|
||||
runTests: 'True'
|
||||
debugClang:
|
||||
setupCmd1: 'julia -e "using Pkg; Pkg.add(PackageSpec(name=\"libcxxwrap_julia_jll\"))"'
|
||||
setupCmd2: 'JlCxxDir=$(julia -e "using libcxxwrap_julia_jll; print(dirname(libcxxwrap_julia_jll.libcxxwrap_julia_path))")'
|
||||
buildCmd: 'CC=clang CXX=clang++ cmake -DJlCxx_DIR=$JlCxxDir/cmake/JlCxx $(cmakeJulia) $(cmakeStdArgs)'
|
||||
runTests: 'True'
|
||||
debugGcc:
|
||||
setupCmd1: ''
|
||||
setupCmd2: ''
|
||||
buildCmd: 'CC=gcc CXX=g++ cmake $(cmakeStdArgs)'
|
||||
runTests: 'True'
|
||||
releaseSTGcc:
|
||||
setupCmd1: ''
|
||||
setupCmd2: ''
|
||||
buildCmd: 'CC=gcc CXX=g++ cmake -DCMAKE_BUILD_TYPE=Release -DZ3_SINGLE_THREADED=ON $(cmakeStdArgs)'
|
||||
runTests: 'True'
|
||||
steps:
|
||||
- script: sudo apt-get install ninja-build
|
||||
- script: |
|
||||
set -e
|
||||
mkdir build
|
||||
cd build
|
||||
$(setupCmd1)
|
||||
$(setupCmd2)
|
||||
$(buildCmd)
|
||||
ninja
|
||||
ninja test-z3
|
||||
cd ..
|
||||
- script: |
|
||||
cd build
|
||||
./test-z3 -a
|
||||
cd ..
|
||||
condition: eq(variables['runTests'], 'True')
|
||||
- ${{if eq(variables['runTests'], 'True')}}:
|
||||
- template: scripts/test-examples-cmake.yml
|
||||
# - template: scripts/test-jupyter.yml
|
||||
# - template: scripts/test-java-cmake.yml
|
||||
- ${{if eq(variables['runTests'], 'True')}}:
|
||||
- template: scripts/test-regressions.yml
|
||||
|
||||
|
||||
|
||||
|
||||
- job: "MacOSPython"
|
||||
displayName: "MacOS build"
|
||||
timeoutInMinutes: 90
|
||||
pool:
|
||||
vmImage: "macOS-latest"
|
||||
steps:
|
||||
- script: python scripts/mk_make.py -d --java --dotnet
|
||||
- script: |
|
||||
set -e
|
||||
cd build
|
||||
make -j3
|
||||
make -j3 examples
|
||||
make -j3 test-z3
|
||||
./cpp_example
|
||||
./c_example
|
||||
# java -cp api/java/classes; JavaExample
|
||||
cd ..
|
||||
# Skip as dead-slow in debug mode:
|
||||
# - template: scripts/test-z3.yml
|
||||
- template: scripts/test-regressions.yml
|
||||
|
||||
|
||||
- job: "MacOSCMake"
|
||||
displayName: "MacOS build with CMake"
|
||||
timeoutInMinutes: 90
|
||||
pool:
|
||||
vmImage: "macOS-latest"
|
||||
steps:
|
||||
- script: brew install ninja
|
||||
- script: brew install --cask julia
|
||||
- script: |
|
||||
julia -e "using Pkg; Pkg.add(PackageSpec(name=\"libcxxwrap_julia_jll\"))"
|
||||
JlCxxDir=$(julia -e "using libcxxwrap_julia_jll; println(joinpath(dirname(libcxxwrap_julia_jll.libcxxwrap_julia_path), \"cmake\", \"JlCxx\"))")
|
||||
set -e
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -DJlCxx_DIR=$JlCxxDir $(cmakeJulia) $(cmakeJava) $(cmakePy) -DZ3_BUILD_DOTNET_BINDINGS=False -G "Ninja" ../
|
||||
ninja
|
||||
ninja test-z3
|
||||
cd ..
|
||||
- template: scripts/test-z3.yml
|
||||
# - template: scripts/test-examples-cmake.yml
|
||||
- template: scripts/test-regressions.yml
|
||||
# - template: scripts/test-java-cmake.yml
|
||||
|
||||
|
||||
- job: "MacOSOCaml"
|
||||
displayName: "MacOS build with OCaml"
|
||||
timeoutInMinutes: 90
|
||||
condition: eq(0,1)
|
||||
pool:
|
||||
vmImage: "macOS-latest"
|
||||
steps:
|
||||
- script: brew install opam
|
||||
- script: opam init -y
|
||||
- script: eval `opam config env`; opam install zarith ocamlfind -y
|
||||
- script: eval `opam config env`; python scripts/mk_make.py --ml
|
||||
- script: |
|
||||
set -e
|
||||
cd build
|
||||
eval `opam config env`
|
||||
make -j3
|
||||
make -j3 examples
|
||||
make -j3 test-z3
|
||||
cd ..
|
||||
- script: eval `opam config env`; ocamlfind install z3 build/api/ml/* -dll build/libz3.*
|
||||
- script: |
|
||||
set -e
|
||||
cd build
|
||||
eval `opam config env`
|
||||
make -j3
|
||||
make -j3 _ex_ml_example_post_install
|
||||
# ./ml_example_shared.byte
|
||||
# ./ml_example_shared_custom.byte
|
||||
# ./ml_example_shared
|
||||
cd ..
|
||||
# Skip as dead-slow in debug mode:
|
||||
# - template: scripts/test-z3.yml
|
||||
- template: scripts/test-regressions.yml
|
||||
BIN
levelwise.pdf
BIN
levelwise.pdf
Binary file not shown.
|
|
@ -2804,6 +2804,17 @@ class ArithRef(ExprRef):
|
|||
a, b = _coerce_exprs(self, other)
|
||||
return BoolRef(Z3_mk_ge(self.ctx_ref(), a.as_ast(), b.as_ast()), self.ctx)
|
||||
|
||||
def __abs__(self):
|
||||
"""Return an expression representing `abs(self)`.
|
||||
|
||||
>>> x = Int('x')
|
||||
>>> abs(x)
|
||||
If(x > 0, x, -x)
|
||||
>>> eq(abs(x), Abs(x))
|
||||
True
|
||||
"""
|
||||
return Abs(self)
|
||||
|
||||
|
||||
def is_arith(a):
|
||||
"""Return `True` if `a` is an arithmetical expression.
|
||||
|
|
@ -6849,7 +6860,7 @@ class ModelRef(Z3PPObject):
|
|||
if isinstance(idx, SortRef):
|
||||
return self.get_universe(idx)
|
||||
if z3_debug():
|
||||
_z3_assert(False, "Integer, Z3 declaration, or Z3 constant expected")
|
||||
_z3_assert(False, "Integer, Z3 declaration, or Z3 constant expected. Use model.eval instead for complicated expressions")
|
||||
return None
|
||||
|
||||
def decls(self):
|
||||
|
|
@ -7657,7 +7668,11 @@ class Solver(Z3PPObject):
|
|||
>>> s = Solver()
|
||||
>>> s.add(x > 0)
|
||||
>>> s.add(x < 2)
|
||||
>>> r = s.sexpr()
|
||||
>>> print(s.sexpr())
|
||||
(declare-fun x () Int)
|
||||
(assert (> x 0))
|
||||
(assert (< x 2))
|
||||
|
||||
"""
|
||||
return Z3_solver_to_string(self.ctx.ref(), self.solver)
|
||||
|
||||
|
|
@ -7683,6 +7698,39 @@ class Solver(Z3PPObject):
|
|||
self.ctx.ref(), "benchmark generated from python API", "", "unknown", "", sz1, v, e,
|
||||
)
|
||||
|
||||
def solutions(self, t):
|
||||
"""Returns an iterator over solutions that satisfy the constraints.
|
||||
|
||||
The parameter `t` is an expression whose values should be returned.
|
||||
|
||||
>>> s = Solver()
|
||||
>>> x, y, z = Ints("x y z")
|
||||
>>> s.add(x * x == 4)
|
||||
>>> print(list(s.solutions(x)))
|
||||
[-2, 2]
|
||||
>>> s.reset()
|
||||
>>> s.add(x >= 0, x < 10)
|
||||
>>> print(list(s.solutions(x)))
|
||||
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
|
||||
>>> s.reset()
|
||||
>>> s.add(x >= 0, y < 10, y == 2*x)
|
||||
>>> print(list(s.solutions([x, y])))
|
||||
[[0, 0], [1, 2], [2, 4], [3, 6], [4, 8]]
|
||||
"""
|
||||
s = Solver()
|
||||
s.add(self.assertions())
|
||||
t = _get_args(t)
|
||||
if isinstance(t, (list, tuple)):
|
||||
while s.check() == sat:
|
||||
result = [s.model().eval(t_, model_completion=True) for t_ in t]
|
||||
yield result
|
||||
s.add(*(t_ != result_ for t_, result_ in zip(t, result)))
|
||||
else:
|
||||
while s.check() == sat:
|
||||
result = s.model().eval(t, model_completion=True)
|
||||
yield result
|
||||
s.add(t != result)
|
||||
|
||||
|
||||
def SolverFor(logic, ctx=None, logFile=None):
|
||||
"""Create a solver customized for the given logic.
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue