Skip to content

Instantly share code, notes, and snippets.

@navanchauhan
Last active February 25, 2026 21:43
Show Gist options
  • Select an option

  • Save navanchauhan/19062169a3ab6bd9099e0a3cfae7b56c to your computer and use it in GitHub Desktop.

Select an option

Save navanchauhan/19062169a3ab6bd9099e0a3cfae7b56c to your computer and use it in GitHub Desktop.
attractor dot workflows
digraph ConsensusTask {
graph [
label="Consensus Task Workflow",
goal="Produce a validated multi-model implementation plan and review consensus for the requested task.",
rankdir=LR,
default_fidelity="truncate",
default_max_retry=3,
retry_target="RefineDoD",
fallback_retry_target="Start"
];
Start [shape=Mdiamond, label="Start"];
Exit [shape=Msquare, label="Exit"];
RefineDoD [
shape=box,
label="Refine DoD Gate",
llm_provider="anthropic",
llm_model="claude-opus-4-6",
reasoning_effort="high",
prompt="Refine the definition-of-done requirements for the current task and identify missing acceptance criteria."
];
DefineDoDGemini [
shape=box,
label="Refine DoD (Gemini)",
llm_provider="gemini",
llm_model="gemini-3-flash-preview",
reasoning_effort="high",
prompt="Propose a complete DoD draft and assumptions for this task."
];
DefineDoDGPT [
shape=box,
label="Refine DoD (GPT-5.2)",
llm_provider="openai",
llm_model="gpt-5.2",
reasoning_effort="high",
prompt="Propose a complete DoD draft with measurable verification criteria."
];
DefineDoDOpus [
shape=box,
label="Refine DoD (Opus)",
llm_provider="anthropic",
llm_model="claude-opus-4-6",
reasoning_effort="high",
prompt="Propose a complete DoD draft with risk-based prioritization."
];
ConsolidateDoD [
shape=box,
label="Consolidate DoD",
llm_provider="anthropic",
llm_model="claude-opus-4-6",
reasoning_effort="high",
prompt="Merge DoD drafts into a single final DoD."
];
PlanGemini [
shape=box,
label="Plan (Gemini)",
llm_provider="gemini",
llm_model="gemini-3-flash-preview",
reasoning_effort="high",
prompt="Create an implementation plan from the consolidated DoD."
];
PlanGPT [
shape=box,
label="Plan (GPT-5.2)",
llm_provider="openai",
llm_model="gpt-5.2",
reasoning_effort="high",
prompt="Create an implementation plan from the consolidated DoD."
];
PlanOpus [
shape=box,
label="Plan (Opus)",
llm_provider="anthropic",
llm_model="claude-opus-4-6",
reasoning_effort="high",
prompt="Create an implementation plan from the consolidated DoD."
];
DebateConsolidate [
shape=box,
label="Debate and Consolidate Plans",
llm_provider="anthropic",
llm_model="claude-opus-4-6",
reasoning_effort="high",
prompt="Synthesize the three plans into one final execution plan."
];
Implement [
shape=box,
label="Implement",
llm_provider="anthropic",
llm_model="claude-opus-4-6",
reasoning_effort="high",
allow_partial=true,
prompt="Execute the final plan and summarize what was implemented."
];
VerifyOutputs [
shape=box,
label="Verify Outputs",
llm_provider="anthropic",
llm_model="claude-opus-4-6",
reasoning_effort="high",
prompt="Verify outputs against DoD and summarize evidence."
];
ReviewGemini [
shape=box,
label="Review (Gemini)",
llm_provider="gemini",
llm_model="gemini-3-flash-preview",
reasoning_effort="high",
prompt="Review implementation and verification evidence."
];
ReviewGPT [
shape=box,
label="Review (GPT-5.2)",
llm_provider="openai",
llm_model="gpt-5.2",
reasoning_effort="high",
prompt="Review implementation and verification evidence."
];
ReviewOpus [
shape=box,
label="Review (Opus)",
llm_provider="anthropic",
llm_model="claude-opus-4-6",
reasoning_effort="high",
prompt="Review implementation and verification evidence."
];
ReviewConsensus [
shape=box,
label="Review Consensus",
llm_provider="anthropic",
llm_model="claude-opus-4-6",
reasoning_effort="high",
max_retries=1,
retry_target="Implement",
prompt="Produce final consensus verdict. Use success when ready to exit, retry when rework is required, fail when blocked."
];
Postmortem [
shape=box,
label="Postmortem",
llm_provider="anthropic",
llm_model="claude-opus-4-6",
reasoning_effort="high",
prompt="Write a postmortem describing why consensus failed and what should change in the next loop."
];
Start -> RefineDoD;
RefineDoD -> DefineDoDGemini;
DefineDoDGemini -> DefineDoDGPT;
DefineDoDGPT -> DefineDoDOpus;
DefineDoDOpus -> ConsolidateDoD;
ConsolidateDoD -> PlanGemini;
PlanGemini -> PlanGPT;
PlanGPT -> PlanOpus;
PlanOpus -> DebateConsolidate;
DebateConsolidate -> Implement;
Implement -> VerifyOutputs;
VerifyOutputs -> ReviewGemini;
ReviewGemini -> ReviewGPT;
ReviewGPT -> ReviewOpus;
ReviewOpus -> ReviewConsensus;
ReviewConsensus -> Exit [condition="outcome=success", label="pass"];
ReviewConsensus -> Postmortem [condition="outcome=retry", label="retry"];
ReviewConsensus -> Exit;
Postmortem -> PlanGemini [condition="context.internal.loop_restart_count=0", loop_restart=true, label="retry_once"];
Postmortem -> Exit [condition="context.internal.loop_restart_count!=0", label="stop_after_retry"];
}
digraph Megaplan {
graph [
goal="Create a high-quality sprint plan using multi-model orientation, drafting, critique, and merge stages.",
rankdir=LR,
default_max_retry=2,
default_fidelity="truncate"
];
Start [shape=Mdiamond, label="Start"];
Exit [shape=Msquare, label="Exit"];
SetupEnvironment [
shape=parallelogram,
label="Setup .ai workspace",
tool_command="set -eu\nmkdir -p .ai/drafts .ai/sprints\nif [ ! -f .ai/ledger.tsv ]; then\n printf 'sprint_id\\ttitle\\tstatus\\tcreated_at\\tupdated_at\\n' > .ai/ledger.tsv\nfi\nprintf 'ready'"
];
OrientConventions [
shape=box,
label="Orient: Conventions (Gemini)",
llm_provider="gemini",
llm_model="gemini-3-flash-preview",
reasoning_effort="high",
prompt="Review repository conventions and summarize coding/testing/release expectations for sprint planning."
];
OrientLedger [
shape=box,
label="Orient: Ledger Status (Gemini)",
llm_provider="gemini",
llm_model="gemini-3-flash-preview",
reasoning_effort="high",
prompt="Review .ai/ledger.tsv and summarize recent sprint status, throughput, and open work."
];
OrientRecent [
shape=box,
label="Orient: Recent Sprints (Claude)",
llm_provider="anthropic",
llm_model="claude-sonnet-4-6",
reasoning_effort="high",
prompt="Inspect recent sprint artifacts under .ai/sprints and .ai/drafts; summarize patterns, risks, and carryover work."
];
SynthesizeOrientation [
shape=box,
label="Synthesize Orientation",
llm_provider="anthropic",
llm_model="claude-opus-4-6",
reasoning_effort="high",
prompt="Synthesize orientation outputs into a concise planning brief and write key assumptions."
];
IntentClaude [
shape=box,
label="Intent (Claude)",
llm_provider="anthropic",
llm_model="claude-opus-4-6",
reasoning_effort="high",
prompt="Draft an intent document for the next sprint: scope, constraints, DoD, and execution strategy."
];
IntentCodex [
shape=box,
label="Intent (Codex/GPT-5.2)",
llm_provider="openai",
llm_model="gpt-5.2",
reasoning_effort="high",
prompt="Draft an independent sprint intent from an implementation-first perspective."
];
IntentGemini [
shape=box,
label="Intent (Gemini)",
llm_provider="gemini",
llm_model="gemini-3-flash-preview",
reasoning_effort="high",
prompt="Draft an independent sprint intent emphasizing validation and delivery cadence."
];
DraftClaude [
shape=box,
label="Initial Draft (Claude)",
llm_provider="anthropic",
llm_model="claude-opus-4-6",
reasoning_effort="high",
prompt="Create a full sprint draft using the intent documents and orientation brief."
];
DraftCodex [
shape=box,
label="Initial Draft (Codex/GPT-5.2)",
llm_provider="openai",
llm_model="gpt-5.2",
reasoning_effort="high",
prompt="Create an independent full sprint draft, including implementation and validation details."
];
DraftGemini [
shape=box,
label="Initial Draft (Gemini)",
llm_provider="gemini",
llm_model="gemini-3-flash-preview",
reasoning_effort="high",
prompt="Create an independent full sprint draft focused on risk control and delivery speed."
];
GenerateQuestions [
shape=box,
label="Generate Interview Questions",
llm_provider="anthropic",
llm_model="claude-sonnet-4-6",
reasoning_effort="high",
prompt="Generate concise interview questions to resolve conflicts across the three initial drafts."
];
InterviewGate [
shape=hexagon,
label="Interview Decision",
prompt="Choose whether to answer interview questions now or skip directly to revision."
];
CaptureInterview [
shape=box,
label="Capture Interview Answers",
llm_provider="anthropic",
llm_model="claude-sonnet-4-6",
reasoning_effort="high",
prompt="Record interview answers and summarize planning deltas that must be applied during revision."
];
SkipInterview [
shape=box,
label="Skip Interview",
llm_provider="gemini",
llm_model="gemini-3-flash-preview",
reasoning_effort="low",
prompt="Document that interview was skipped and list assumptions to keep revision deterministic."
];
ReviseClaude [
shape=box,
label="Revised Draft (Claude)",
llm_provider="anthropic",
llm_model="claude-opus-4-6",
reasoning_effort="high",
prompt="Revise sprint draft with interview/assumption inputs and improve execution clarity."
];
ReviseCodex [
shape=box,
label="Revised Draft (Codex/GPT-5.2)",
llm_provider="openai",
llm_model="gpt-5.2",
reasoning_effort="high",
prompt="Revise sprint draft with interview/assumption inputs and strengthen implementation quality."
];
ReviseGemini [
shape=box,
label="Revised Draft (Gemini)",
llm_provider="gemini",
llm_model="gemini-3-flash-preview",
reasoning_effort="high",
prompt="Revise sprint draft with interview/assumption inputs and strengthen validation strategy."
];
CrossModelCritiques [
shape=box,
label="Cross-Model Critiques",
llm_provider="anthropic",
llm_model="claude-opus-4-6",
reasoning_effort="high",
prompt="Synthesize cross-model critiques and identify required edits before merge."
];
MergeAnalysis [
shape=box,
label="Merge Analysis",
llm_provider="anthropic",
llm_model="claude-opus-4-6",
reasoning_effort="high",
prompt="Compare revised drafts and critique synthesis; select best architecture, milestones, and DoD language."
];
WriteFinalSprint [
shape=box,
label="Write Final Sprint",
llm_provider="anthropic",
llm_model="claude-opus-4-6",
reasoning_effort="high",
prompt="Write final sprint plan ready for execution, including explicit DoD and verification checklist."
];
SyncLedger [
shape=parallelogram,
label="Sync Ledger",
tool_command="set -eu\nsprint_id=$(awk -F '\\t' 'NR>1{print $1}' .ai/ledger.tsv | sort | tail -n1)\nif [ -z \"$sprint_id\" ]; then\n next=001\nelse\n next=$(printf '%03d' $((10#$sprint_id + 1)))\nfi\nnow=$(date -u +%Y-%m-%dT%H:%M:%SZ)\nprintf '%s\\t%s\\t%s\\t%s\\t%s\\n' \"$next\" \"Generated Sprint $next\" \"planned\" \"$now\" \"$now\" >> .ai/ledger.tsv\nprintf 'synced-%s' \"$next\""
];
Start -> SetupEnvironment;
SetupEnvironment -> OrientConventions;
OrientConventions -> OrientLedger;
OrientLedger -> OrientRecent;
OrientRecent -> SynthesizeOrientation;
SynthesizeOrientation -> IntentClaude;
IntentClaude -> IntentCodex;
IntentCodex -> IntentGemini;
IntentGemini -> DraftClaude;
DraftClaude -> DraftCodex;
DraftCodex -> DraftGemini;
DraftGemini -> GenerateQuestions;
GenerateQuestions -> InterviewGate;
InterviewGate -> CaptureInterview [label="[A] Answer interview"];
InterviewGate -> SkipInterview [label="[S] Skip interview"];
CaptureInterview -> ReviseClaude;
SkipInterview -> ReviseClaude;
ReviseClaude -> ReviseCodex;
ReviseCodex -> ReviseGemini;
ReviseGemini -> CrossModelCritiques;
CrossModelCritiques -> MergeAnalysis;
MergeAnalysis -> WriteFinalSprint;
WriteFinalSprint -> SyncLedger;
SyncLedger -> Exit;
}
digraph Semport {
graph [
goal="Process one semantic-port ledger row at a time and record implemented vs acknowledged status.",
rankdir=LR,
default_max_retry=2
];
Start [shape=Mdiamond, label="Start"];
Exit [shape=Msquare, label="Exit"];
InitLedger [
shape=parallelogram,
label="Init semport ledger",
tool_command="set -eu\nmkdir -p .ai/semport semport\nif [ ! -f semport/ledger.tsv ]; then\n printf 'shortsha\\tiso8601\\tdisposition\\n' > semport/ledger.tsv\nfi"
];
SyncAndPick [
shape=parallelogram,
label="Pick next new row",
tool_command="set -eu\nif ! awk -F '\\t' 'NR>1 && $3==\"new\"{found=1} END{exit found?0:1}' semport/ledger.tsv; then\n printf 'demo001\\t%s\\tnew\\n' \"$(date -u +%Y-%m-%dT%H:%M:%SZ)\" >> semport/ledger.tsv\nfi\nnext_row=$(awk -F '\\t' 'NR>1 && $3==\"new\"{print; exit}' semport/ledger.tsv || true)\nif [ -n \"$next_row\" ]; then\n printf '%s\\n' \"$next_row\" > .ai/semport/current_commit.tsv\n printf 'process'\nelse\n printf 'done'\nfi"
];
AnalyzePlan [
shape=box,
label="Analyze change (Sonnet)",
llm_provider="anthropic",
llm_model="claude-sonnet-4-6[1m]",
reasoning_effort="high",
prompt="Read .ai/semport/current_commit.tsv and decide whether to port or acknowledge the item. If it should be ported, set preferred_next_label to port in your status block; otherwise leave it empty and summarize why."
];
ImplementPort [
shape=box,
label="Implement port (GPT-5.2)",
llm_provider="openai",
llm_model="gpt-5.2",
reasoning_effort="high",
prompt="Implement the port for the selected row and summarize changes."
];
ValidatePort [
shape=parallelogram,
label="Validate build",
tool_command="set -eu\nswift build --target OmniAIAttractor >/tmp/semport-build.log 2>&1 || { cat /tmp/semport-build.log; exit 1; }\nprintf 'validated'"
];
MarkAcknowledged [
shape=parallelogram,
label="Mark acknowledged",
tool_command="set -eu\nsha=$(awk -F '\\t' 'NR==1{print $1}' .ai/semport/current_commit.tsv)\nawk -F '\\t' -v OFS='\\t' -v sha=\"$sha\" '\nNR==1 {print; next}\n{ if ($1==sha) { $3=\"acknowledged\" } ; print }\n' semport/ledger.tsv > semport/ledger.tsv.tmp\nmv semport/ledger.tsv.tmp semport/ledger.tsv\nprintf 'acknowledged'"
];
MarkImplemented [
shape=parallelogram,
label="Mark implemented",
tool_command="set -eu\nsha=$(awk -F '\\t' 'NR==1{print $1}' .ai/semport/current_commit.tsv)\nawk -F '\\t' -v OFS='\\t' -v sha=\"$sha\" '\nNR==1 {print; next}\n{ if ($1==sha) { $3=\"implemented\" } ; print }\n' semport/ledger.tsv > semport/ledger.tsv.tmp\nmv semport/ledger.tsv.tmp semport/ledger.tsv\nprintf 'implemented'"
];
Start -> InitLedger;
InitLedger -> SyncAndPick;
SyncAndPick -> AnalyzePlan [label="process", condition="context.tool_stdout=process"];
SyncAndPick -> Exit [label="done", condition="context.tool_stdout=done"];
AnalyzePlan -> ImplementPort [label="port", condition="preferred_label=port"];
AnalyzePlan -> MarkAcknowledged [label="skip"];
ImplementPort -> ValidatePort;
ValidatePort -> MarkImplemented [label="pass", condition="outcome=success"];
ValidatePort -> MarkAcknowledged [label="fail", condition="outcome=fail"];
MarkAcknowledged -> Exit;
MarkImplemented -> Exit;
}
digraph SprintExec {
graph [
goal="Execute the next incomplete sprint from .ai/ledger.tsv through implementation, validation, review, and completion.",
rankdir=LR,
default_max_retry=2,
retry_target="ImplementSprint"
];
Start [shape=Mdiamond, label="Start"];
Exit [shape=Msquare, label="Exit"];
FindNextSprint [
shape=box,
label="Find Next Sprint",
llm_provider="gemini",
llm_model="gemini-3-flash-preview",
reasoning_effort="high",
prompt="Identify the next non-completed sprint in .ai/ledger.tsv and summarize the target sprint."
];
ReadSprint [
shape=box,
label="Read Sprint",
llm_provider="gemini",
llm_model="gemini-3-flash-preview",
reasoning_effort="high",
prompt="Read the target sprint document and summarize requirements, DoD items, and expected artifacts."
];
MarkInProgress [
shape=parallelogram,
label="Mark In Progress",
tool_command="set -eu\nmkdir -p .ai\nif [ ! -f .ai/ledger.tsv ]; then\n printf 'sprint_id\\ttitle\\tstatus\\tcreated_at\\tupdated_at\\n001\\tBootstrap sprint\\tplanned\\t%s\\t%s\\n' \"$(date -u +%Y-%m-%dT%H:%M:%SZ)\" \"$(date -u +%Y-%m-%dT%H:%M:%SZ)\" > .ai/ledger.tsv\nfi\ntarget=$(awk -F '\\t' 'NR>1 && $3!=\"completed\"{print $1; exit}' .ai/ledger.tsv)\nif [ -z \"$target\" ]; then\n target=$(awk -F '\\t' 'END{print $1}' .ai/ledger.tsv)\nfi\nnow=$(date -u +%Y-%m-%dT%H:%M:%SZ)\nawk -F '\\t' -v OFS='\\t' -v target=\"$target\" -v now=\"$now\" '\nNR==1 {print; next}\n{ if ($1==target) { $3=\"in_progress\"; $5=now }; print }\n' .ai/ledger.tsv > .ai/ledger.tsv.tmp\nmv .ai/ledger.tsv.tmp .ai/ledger.tsv\nprintf '%s' \"$target\" > .ai/current_sprint_id.txt\nprintf 'in_progress-%s' \"$target\""
];
ImplementSprint [
shape=box,
label="Implement Sprint",
llm_provider="anthropic",
llm_model="claude-sonnet-4-6",
reasoning_effort="high",
max_retries=1,
retry_target="ImplementSprint",
prompt="Implement sprint requirements and produce a concrete execution summary with changes and risks."
];
ValidateBuild [
shape=parallelogram,
label="Validate Build and Tests",
tool_command="set -eu\nswift build --target OmniAIAttractor >/tmp/sprint-exec-build.log 2>&1 || { cat /tmp/sprint-exec-build.log; exit 1; }\nprintf 'validation-pass'"
];
PrepareCommit [
shape=box,
label="Prepare Commit Plan",
llm_provider="openai",
llm_model="gpt-5.2",
reasoning_effort="high",
prompt="Prepare a commit plan and changelog summary for sprint work without creating a commit."
];
ReviewClaude [
shape=box,
label="Review (Claude)",
llm_provider="anthropic",
llm_model="claude-opus-4-6",
reasoning_effort="high",
prompt="Review the sprint implementation for correctness against DoD and testing evidence."
];
ReviewCodex [
shape=box,
label="Review (Codex/GPT-5.2)",
llm_provider="openai",
llm_model="gpt-5.2",
reasoning_effort="high",
prompt="Review the sprint implementation from a code-quality and regression-risk perspective."
];
ReviewGemini [
shape=box,
label="Review (Gemini)",
llm_provider="gemini",
llm_model="gemini-3-flash-preview",
reasoning_effort="high",
prompt="Review sprint implementation and validation completeness."
];
ReviewAnalysis [
shape=box,
label="Review Analysis",
llm_provider="anthropic",
llm_model="claude-opus-4-6",
reasoning_effort="high",
goal_gate=true,
max_retries=1,
retry_target="ImplementSprint",
allow_partial=true,
prompt="Synthesize review findings and return success if sprint is complete, retry if fixes are needed, fail if blocked."
];
CompleteSprint [
shape=parallelogram,
label="Complete Sprint",
tool_command="set -eu\ntarget=$(cat .ai/current_sprint_id.txt)\nnow=$(date -u +%Y-%m-%dT%H:%M:%SZ)\nawk -F '\\t' -v OFS='\\t' -v target=\"$target\" -v now=\"$now\" '\nNR==1 {print; next}\n{ if ($1==target) { $3=\"completed\"; $5=now }; print }\n' .ai/ledger.tsv > .ai/ledger.tsv.tmp\nmv .ai/ledger.tsv.tmp .ai/ledger.tsv\nprintf 'completed-%s' \"$target\""
];
FailureSummary [
shape=box,
label="Failure Summary",
llm_provider="anthropic",
llm_model="claude-sonnet-4-6",
reasoning_effort="high",
prompt="Summarize why sprint execution failed and what must be fixed before rerun."
];
Start -> FindNextSprint;
FindNextSprint -> ReadSprint;
ReadSprint -> MarkInProgress;
MarkInProgress -> ImplementSprint;
ImplementSprint -> ValidateBuild;
ValidateBuild -> PrepareCommit [condition="outcome=success", label="pass"];
ValidateBuild -> FailureSummary [condition="outcome=fail", label="fail"];
PrepareCommit -> ReviewClaude;
ReviewClaude -> ReviewCodex;
ReviewCodex -> ReviewGemini;
ReviewGemini -> ReviewAnalysis;
ReviewAnalysis -> CompleteSprint [condition="outcome=success", label="pass"];
ReviewAnalysis -> ImplementSprint [condition="outcome=retry", label="retry"];
ReviewAnalysis -> FailureSummary [condition="outcome=fail", label="fail"];
CompleteSprint -> Exit;
FailureSummary -> Exit;
}
digraph VulnerabilityAnalyzer {
graph [
goal="Run a deterministic static vulnerability scan against a known vulnerable application and emit a report with evidence.",
rankdir=LR,
default_max_retry=1
];
Start [shape=Mdiamond, label="Start"];
Exit [shape=Msquare, label="Exit"];
CloneTarget [
shape=parallelogram,
label="Clone vulnerable target",
tool_command="set -eu\nmkdir -p .ai/vuln\ntarget_dir=.ai/vuln/target\nif [ ! -d \"$target_dir/.git\" ]; then\n git clone --depth 1 https://github.com/digininja/DVWA.git \"$target_dir\"\nfi\nprintf '%s\\n' \"$target_dir\" > .ai/vuln/target_path.txt\nprintf 'ready'"
];
StaticScan [
shape=parallelogram,
label="Run static scan",
tool_command="set -eu\ntarget_dir=$(cat .ai/vuln/target_path.txt)\nreport=.ai/vuln/static_findings.txt\n{\n echo '# Static findings (pattern scan)';\n echo '# target='\"$target_dir\";\n if command -v rg >/dev/null 2>&1; then\n rg -n --hidden -g '!vendor' -g '!*.min.*' 'mysql_query\\(|mysqli_query\\(|\\$_GET\\[|\\$_POST\\[|eval\\(|exec\\(|shell_exec\\(' \"$target_dir\" || true\n else\n grep -RInE 'mysql_query\\(|mysqli_query\\(|\\$_GET\\[|\\$_POST\\[|eval\\(|exec\\(|shell_exec\\(' \"$target_dir\" || true\n fi\n} > \"$report\"\nprintf 'scanned'"
];
EvaluateFindings [
shape=parallelogram,
label="Evaluate finding count",
tool_command="set -eu\ncount=$(tail -n +3 .ai/vuln/static_findings.txt | grep -c ':' || true)\nprintf '%s\\n' \"$count\" > .ai/vuln/finding_count.txt\nif [ \"$count\" -gt 0 ]; then\n printf 'has_findings'\nelse\n printf 'no_findings'\nfi"
];
WriteReport [
shape=parallelogram,
label="Write vulnerability report",
tool_command="set -eu\ncount=$(cat .ai/vuln/finding_count.txt)\n{\n echo '# Vulnerability Analyzer Report';\n echo;\n echo '- Verdict: FAIL (target is vulnerable)';\n echo '- Finding count: '\"$count\";\n echo;\n echo '## Evidence (first 50 matches)';\n tail -n +3 .ai/vuln/static_findings.txt | head -n 50;\n} > .ai/vuln/report.md\nprintf 'report_written'"
];
NoFindingsFail [
shape=parallelogram,
label="No findings fail-safe",
tool_command="set -eu\ncat > .ai/vuln/report.md <<'EOF'\n# Vulnerability Analyzer Report\n\n- Verdict: INCONCLUSIVE/FAIL\n- Reason: Zero findings were detected in a known-vulnerable target.\nEOF\nprintf 'unexpected_no_findings'\nexit 1"
];
Start -> CloneTarget;
CloneTarget -> StaticScan;
StaticScan -> EvaluateFindings;
EvaluateFindings -> WriteReport [label="has_findings", condition="context.tool_stdout=has_findings"];
EvaluateFindings -> NoFindingsFail [label="no_findings", condition="context.tool_stdout=no_findings"];
WriteReport -> Exit;
NoFindingsFail -> Exit;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment