name: CI

on:
  push:
    branches:
      - main
  pull_request:
  workflow_dispatch:
    inputs:
      full:
        description: "Run all jobs (overrides lane toggles)"
        type: boolean
        default: false
      lint_only:
        description: "Run only lint"
        type: boolean
        default: false
      faults_only:
        description: "Run lint + faults, skip full pytest"
        type: boolean
        default: false
      with_coverage:
        description: "Generate optional coverage XML artifact in tests job"
        type: boolean
        default: false

concurrency:
  group: ci-${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
  cancel-in-progress: true

jobs:
  lint:
    runs-on: ubuntu-latest
    timeout-minutes: 15
    steps:
      - name: Checkout
        uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2

      - name: Set up Python
        uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
        with:
          python-version: "3.12"

      - name: Install uv
        uses: astral-sh/setup-uv@5a095e7a2014a4212f075830d4f7277575a9d098 # v7.3.1

      - name: Sync dependencies
        run: uv sync --extra dev

      - name: Ruff
        run: uv run ruff check src tests

  changes:
    runs-on: ubuntu-latest
    timeout-minutes: 10
    outputs:
      docs_only: ${{ steps.scope.outputs.docs_only }}
      changed_services: ${{ steps.scope.outputs.changed_services }}
      changed_tests: ${{ steps.scope.outputs.changed_tests }}
      changed_docs: ${{ steps.scope.outputs.changed_docs }}
      changed_workflows: ${{ steps.scope.outputs.changed_workflows }}
      run_tests: ${{ steps.scope.outputs.run_tests }}
      run_faults: ${{ steps.scope.outputs.run_faults }}
      run_eval_gate: ${{ steps.scope.outputs.run_eval_gate }}
    steps:
      - name: Checkout
        uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
        with:
          fetch-depth: 0

      - name: Compute changed scope
        id: scope
        shell: bash
        run: |
          set -euo pipefail

          event="${{ github.event_name }}"

          if [[ "$event" == "workflow_dispatch" ]]; then
            full="${{ github.event.inputs.full }}"
            lint_only="${{ github.event.inputs.lint_only }}"
            faults_only="${{ github.event.inputs.faults_only }}"

            run_tests=true
            run_faults=true
            run_eval_gate=true
            if [[ "$full" == "true" ]]; then
              run_tests=true
              run_faults=true
              run_eval_gate=true
            elif [[ "$lint_only" == "true" ]]; then
              run_tests=false
              run_faults=false
              run_eval_gate=false
            elif [[ "$faults_only" == "true" ]]; then
              run_tests=false
              run_faults=true
              run_eval_gate=false
            fi

            {
              echo "docs_only=false"
              echo "changed_services=true"
              echo "changed_tests=true"
              echo "changed_docs=true"
              echo "changed_workflows=true"
              echo "run_tests=${run_tests}"
              echo "run_faults=${run_faults}"
              echo "run_eval_gate=${run_eval_gate}"
            } >> "$GITHUB_OUTPUT"
            exit 0
          fi

          if [[ "$event" == "push" ]]; then
            base="${{ github.event.before }}"
            head="${{ github.sha }}"
          else
            base="${{ github.event.pull_request.base.sha }}"
            head="${{ github.event.pull_request.head.sha }}"
          fi

          changed="$(git diff --name-only "$base" "$head" 2>/dev/null || true)"
          if [[ -z "$changed" ]]; then
            # Fail-safe: run all when change detection is uncertain.
            {
              echo "docs_only=false"
              echo "changed_services=true"
              echo "changed_tests=true"
              echo "changed_docs=true"
              echo "changed_workflows=true"
              echo "run_tests=true"
              echo "run_faults=true"
              echo "run_eval_gate=true"
            } >> "$GITHUB_OUTPUT"
            exit 0
          fi

          docs_only=true
          changed_services=false
          changed_tests=false
          changed_docs=false
          changed_workflows=false
          changed_fault_inputs=false
          changed_eval_inputs=false

          while IFS= read -r path; do
            [[ -z "$path" ]] && continue
            case "$path" in
              docs/*|*.md|*.mdx)
                changed_docs=true
                ;;
              *)
                docs_only=false
                ;;
            esac

            case "$path" in
              src/*)
                changed_services=true
                changed_fault_inputs=true
                changed_eval_inputs=true
                ;;
            esac

            case "$path" in
              tests/*)
                changed_tests=true
                changed_fault_inputs=true
                changed_eval_inputs=true
                ;;
            esac

            case "$path" in
              .github/workflows/*)
                changed_workflows=true
                changed_fault_inputs=true
                changed_eval_inputs=true
                ;;
            esac

            case "$path" in
              Makefile|scripts/test_faults.sh)
                changed_fault_inputs=true
                ;;
            esac

            case "$path" in
              docs/evals/*|scripts/run_eval_dataset.py|scripts/run_router_policy_eval.py|scripts/run_runtime_outcome_gate.py|scripts/run_memory_quality_eval.py)
                changed_eval_inputs=true
                ;;
            esac
          done <<< "$changed"

          run_tests=true
          run_faults=true
          run_eval_gate=false
          if [[ "$docs_only" == "true" ]]; then
            run_tests=false
            run_faults=false
            run_eval_gate=false
          fi
          if [[ "$changed_fault_inputs" != "true" ]]; then
            run_faults=false
          fi
          if [[ "$changed_eval_inputs" == "true" ]]; then
            run_eval_gate=true
          fi

          {
            echo "docs_only=${docs_only}"
            echo "changed_services=${changed_services}"
            echo "changed_tests=${changed_tests}"
            echo "changed_docs=${changed_docs}"
            echo "changed_workflows=${changed_workflows}"
            echo "run_tests=${run_tests}"
            echo "run_faults=${run_faults}"
            echo "run_eval_gate=${run_eval_gate}"
          } >> "$GITHUB_OUTPUT"

  tests:
    needs: [changes]
    if: needs.changes.outputs.run_tests == 'true'
    runs-on: ubuntu-latest
    timeout-minutes: 30
    steps:
      - name: Checkout
        uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2

      - name: Set up Python
        uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
        with:
          python-version: "3.12"

      - name: Install uv
        uses: astral-sh/setup-uv@5a095e7a2014a4212f075830d4f7277575a9d098 # v7.3.1

      - name: Sync dependencies
        run: uv sync --extra dev

      - name: Pytest
        run: |
          mkdir -p .artifacts/pytest
          uv run pytest -q --junitxml=.artifacts/pytest/junit.xml

      - name: Summarize slowest tests
        if: always()
        run: |
          python - <<'PY'
          from __future__ import annotations

          import os
          from pathlib import Path
          import xml.etree.ElementTree as ET

          junit_path = Path(".artifacts/pytest/junit.xml")
          summary_path = Path(os.environ["GITHUB_STEP_SUMMARY"])

          lines = ["### Slowest tests", ""]
          if not junit_path.exists():
            lines.append("No junit report found.")
          else:
            root = ET.parse(junit_path).getroot()
            rows: list[tuple[float, str]] = []
            for testcase in root.iter("testcase"):
              try:
                seconds = float(testcase.attrib.get("time", "0") or "0")
              except ValueError:
                seconds = 0.0
              classname = testcase.attrib.get("classname", "").strip()
              name = testcase.attrib.get("name", "").strip()
              label = f"{classname}::{name}" if classname else (name or "<unnamed>")
              rows.append((seconds, label))

            lines.extend(["| Duration (s) | Test |", "| ---: | --- |"])
            for seconds, label in sorted(rows, key=lambda row: row[0], reverse=True)[:10]:
              safe_label = label.replace("|", "\\|")
              lines.append(f"| {seconds:.3f} | `{safe_label}` |")
            if not rows:
              lines.append("| 0.000 | No test cases found |")

          with summary_path.open("a", encoding="utf-8") as handle:
            handle.write("\n".join(lines) + "\n")
          PY

      - name: Upload pytest junit
        if: always()
        uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
        with:
          name: pytest-junit-${{ github.run_id }}
          path: .artifacts/pytest/junit.xml
          if-no-files-found: warn

      - name: Optional coverage XML
        if: github.event_name == 'workflow_dispatch' && github.event.inputs.with_coverage == 'true'
        continue-on-error: true
        run: |
          mkdir -p .artifacts/pytest
          uv pip install pytest-cov
          uv run pytest -q --cov=jarvis --cov-report=xml:.artifacts/pytest/coverage.xml

      - name: Upload optional coverage XML
        if: always() && github.event_name == 'workflow_dispatch' && github.event.inputs.with_coverage == 'true'
        uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
        with:
          name: pytest-coverage-${{ github.run_id }}
          path: .artifacts/pytest/coverage.xml
          if-no-files-found: warn

  eval-gate:
    needs: [changes]
    if: needs.changes.outputs.run_eval_gate == 'true'
    runs-on: ubuntu-latest
    timeout-minutes: 30
    steps:
      - name: Checkout
        uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2

      - name: Set up Python
        uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
        with:
          python-version: "3.12"

      - name: Install uv
        uses: astral-sh/setup-uv@5a095e7a2014a4212f075830d4f7277575a9d098 # v7.3.1

      - name: Sync dependencies
        run: uv sync --extra dev

      - name: Run completion quality gate
        run: |
          mkdir -p .artifacts/quality
          make eval-dataset
          make runtime-outcome-gate

      - name: Run routing quality gate
        run: |
          mkdir -p .artifacts/quality
          make router-eval

      - name: Upload eval artifacts
        if: always()
        uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
        with:
          name: eval-gate-${{ github.run_id }}
          path: .artifacts/quality
          if-no-files-found: warn

  faults:
    needs: [changes]
    if: needs.changes.outputs.run_faults == 'true'
    runs-on: ubuntu-latest
    timeout-minutes: 15
    steps:
      - name: Checkout
        uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2

      - name: Set up Python
        uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
        with:
          python-version: "3.12"

      - name: Install uv
        uses: astral-sh/setup-uv@5a095e7a2014a4212f075830d4f7277575a9d098 # v7.3.1

      - name: Sync dependencies
        run: uv sync --extra dev

      - name: Fault subset
        run: make test-faults
