Skip to content

learning

learning

Learning and pattern management commands for Marianne CLI.

This package implements commands for monitoring and managing the learning system. Originally a single 1673-line file, now split into focused modules:

  • _patterns: Pattern listing and WHY analysis (patterns-list, patterns-why)
  • _stats: Learning statistics, insights, activity (learning-stats, learning-insights, learning-activity)
  • _drift: Drift detection (learning-drift, learning-epistemic-drift)
  • _entropy: Entropy monitoring (patterns-entropy, entropy-status)
  • _budget: Exploration budget (patterns-budget)

All commands are re-exported from this init.py for backward compatibility. The import path from .commands.learning import ... continues to work unchanged.

Functions

patterns_budget

patterns_budget(job=Option(None, '--job', '-j', help='Filter by specific job hash'), history=Option(False, '--history', '-H', help='Show budget adjustment history'), limit=Option(20, '--limit', '-n', help='Number of history records to show'), json_output=Option(False, '--json', help='Output as JSON for machine parsing'))

Display exploration budget status and history.

The budget adjusts based on pattern entropy: - Low entropy -> budget increases (boost) to inject diversity - Healthy entropy -> budget decays toward floor - Budget never drops below floor (default 5%)

Examples:

mzt patterns-budget # Show current budget status mzt patterns-budget --history # View budget adjustment history mzt patterns-budget --job abc123 # Filter by specific job mzt patterns-budget --json # JSON output for scripting

Source code in src/marianne/cli/commands/learning/_budget.py
def patterns_budget(
    job: str = typer.Option(
        None,
        "--job",
        "-j",
        help="Filter by specific job hash",
    ),
    history: bool = typer.Option(
        False,
        "--history",
        "-H",
        help="Show budget adjustment history",
    ),
    limit: int = typer.Option(
        20,
        "--limit",
        "-n",
        help="Number of history records to show",
    ),
    json_output: bool = typer.Option(
        False,
        "--json",
        help="Output as JSON for machine parsing",
    ),
) -> None:
    """Display exploration budget status and history.

    The budget adjusts based on pattern entropy:
    - Low entropy -> budget increases (boost) to inject diversity
    - Healthy entropy -> budget decays toward floor
    - Budget never drops below floor (default 5%)

    Examples:
        mzt patterns-budget               # Show current budget status
        mzt patterns-budget --history     # View budget adjustment history
        mzt patterns-budget --job abc123  # Filter by specific job
        mzt patterns-budget --json        # JSON output for scripting
    """
    from marianne.learning.global_store import get_global_store

    store = get_global_store()

    if history:
        history_records = store.get_exploration_budget_history(
            job_hash=job, limit=limit
        )

        if json_output:
            budget_hist_output: list[dict[str, object]] = []
            for record in history_records:
                budget_hist_output.append({
                    "id": record.id,
                    "job_hash": record.job_hash,
                    "recorded_at": record.recorded_at.isoformat(),
                    "budget_value": round(record.budget_value, 4),
                    "entropy_at_time": (
                        round(record.entropy_at_time, 4) if record.entropy_at_time else None
                    ),
                    "adjustment_type": record.adjustment_type,
                    "adjustment_reason": record.adjustment_reason,
                })
            console.print(json_lib.dumps(budget_hist_output, indent=2))
            return

        if not history_records:
            console.print("[dim]No budget history found.[/dim]")
            console.print(
                "\n[dim]Hint: Enable exploration_budget in learning config "
                "to start tracking.[/dim]"
            )
            return

        table = Table(title="Exploration Budget History")
        table.add_column("Time", style="dim", width=16)
        table.add_column("Budget", justify="right", width=8)
        table.add_column("Entropy", justify="right", width=8)
        table.add_column("Type", width=12)
        table.add_column("Reason", width=35)

        for record in history_records:
            if record.budget_value <= 0.10:
                budget_color = "yellow"
            elif record.budget_value >= 0.30:
                budget_color = "cyan"
            else:
                budget_color = "green"
            budget_str = f"[{budget_color}]{record.budget_value:.1%}[/{budget_color}]"

            if record.entropy_at_time is not None:
                ent_color = "red" if record.entropy_at_time < 0.3 else "green"
                ent_str = f"[{ent_color}]{record.entropy_at_time:.3f}[/{ent_color}]"
            else:
                ent_str = "[dim]—[/dim]"

            type_colors = {
                "initial": "blue",
                "boost": "green",
                "decay": "dim",
                "floor_enforced": "yellow",
                "ceiling_enforced": "yellow",
            }
            type_color = type_colors.get(record.adjustment_type, "white")
            type_str = f"[{type_color}]{record.adjustment_type}[/{type_color}]"

            table.add_row(
                record.recorded_at.strftime("%m-%d %H:%M:%S"),
                budget_str,
                ent_str,
                type_str,
                record.adjustment_reason or "",
            )

        console.print(table)
        console.print(f"\n[dim]Showing {len(history_records)} record(s)[/dim]")
        return

    current = store.get_exploration_budget(job_hash=job)
    stats = store.get_exploration_budget_statistics(job_hash=job)

    if json_output:
        entropy_val = None
        if current and current.entropy_at_time:
            entropy_val = round(current.entropy_at_time, 4)
        budget_output: dict[str, dict[str, Any]] = {
            "current": {
                "budget_value": round(current.budget_value, 4) if current else None,
                "entropy_at_time": entropy_val,
                "adjustment_type": current.adjustment_type if current else None,
                "recorded_at": current.recorded_at.isoformat() if current else None,
            },
            "statistics": {
                "avg_budget": round(stats["avg_budget"], 4),
                "min_budget": round(stats["min_budget"], 4),
                "max_budget": round(stats["max_budget"], 4),
                "total_adjustments": stats["total_adjustments"],
                "floor_enforcements": stats["floor_enforcements"],
                "boost_count": stats["boost_count"],
                "decay_count": stats["decay_count"],
            },
        }
        console.print(json_lib.dumps(budget_output, indent=2))
        return

    console.print("[bold]Exploration Budget Status[/bold]\n")

    if current is None:
        console.print("[dim]No budget records found.[/dim]")
        console.print("\n[dim]Hint: Enable exploration_budget in learning config:[/dim]")
        console.print("[dim]  learning:[/dim]")
        console.print("[dim]    exploration_budget:[/dim]")
        console.print("[dim]      enabled: true[/dim]")
        return

    if current.budget_value <= 0.10:
        budget_color = "yellow"
        budget_status = "Low (near floor)"
    elif current.budget_value >= 0.30:
        budget_color = "cyan"
        budget_status = "High (exploring)"
    else:
        budget_color = "green"
        budget_status = "Normal"

    console.print(
        f"  Current Budget: [{budget_color}]{current.budget_value:.1%}"
        f"[/{budget_color}] ({budget_status})"
    )

    if current.entropy_at_time is not None:
        ent_color = "red" if current.entropy_at_time < 0.3 else "green"
        console.print(
            f"  Entropy at Last Check: [{ent_color}]"
            f"{current.entropy_at_time:.4f}[/{ent_color}]"
        )

    console.print(f"  Last Adjustment: [dim]{current.adjustment_type}[/dim]")
    console.print(f"  Last Updated: [dim]{current.recorded_at.strftime('%Y-%m-%d %H:%M:%S')}[/dim]")

    console.print("")

    if stats["total_adjustments"] > 0:
        console.print("[bold]Statistics[/bold]")
        console.print(f"  Average Budget: [cyan]{stats['avg_budget']:.1%}[/cyan]")
        console.print(f"  Range: [dim]{stats['min_budget']:.1%} - {stats['max_budget']:.1%}[/dim]")
        console.print(f"  Total Adjustments: [yellow]{stats['total_adjustments']}[/yellow]")
        console.print(
            f"    Boosts: [green]{stats['boost_count']}[/green] | "
            f"Decays: [dim]{stats['decay_count']}[/dim] | "
            f"Floor Enforced: [yellow]{stats['floor_enforcements']}[/yellow]"
        )

    console.print("")
    if stats["floor_enforcements"] > stats["total_adjustments"] * 0.3:
        console.print("[yellow]⚠ Budget frequently hitting floor[/yellow]")
        console.print("[dim]Consider lowering entropy threshold or increasing boost amount.[/dim]")
    elif stats["boost_count"] > stats["decay_count"] * 2:
        console.print("[cyan]ℹ Frequent boosts - entropy may be consistently low[/cyan]")
        console.print("[dim]This may indicate pattern concentration issues.[/dim]")
    elif stats["total_adjustments"] > 0:
        console.print("[green]✓ Budget adjusting normally[/green]")

learning_drift

learning_drift(threshold=Option(0.2, '--threshold', '-t', help='Drift threshold (0.0-1.0) to flag patterns'), window=Option(5, '--window', '-w', help='Window size for drift comparison'), limit=Option(10, '--limit', '-l', help='Maximum number of patterns to show'), json_output=Option(False, '--json', '-j', help='Output as JSON for machine parsing'), summary=Option(False, '--summary', '-s', help='Show only summary statistics'))

Detect patterns with effectiveness drift.

Drift is calculated by comparing the pattern's effectiveness in its last N applications vs the previous N applications.

Examples:

mzt learning-drift # Show drifting patterns mzt learning-drift -t 0.15 # Lower threshold (more sensitive) mzt learning-drift -w 10 # Larger comparison window mzt learning-drift --summary # Just show summary stats mzt learning-drift --json # JSON output for scripting

Source code in src/marianne/cli/commands/learning/_drift.py
def learning_drift(
    threshold: float = typer.Option(
        0.2,
        "--threshold",
        "-t",
        help="Drift threshold (0.0-1.0) to flag patterns",
    ),
    window: int = typer.Option(
        5,
        "--window",
        "-w",
        help="Window size for drift comparison",
    ),
    limit: int = typer.Option(
        10,
        "--limit",
        "-l",
        help="Maximum number of patterns to show",
    ),
    json_output: bool = typer.Option(
        False,
        "--json",
        "-j",
        help="Output as JSON for machine parsing",
    ),
    summary: bool = typer.Option(
        False,
        "--summary",
        "-s",
        help="Show only summary statistics",
    ),
) -> None:
    """Detect patterns with effectiveness drift.

    Drift is calculated by comparing the pattern's effectiveness in its
    last N applications vs the previous N applications.

    Examples:
        mzt learning-drift                # Show drifting patterns
        mzt learning-drift -t 0.15        # Lower threshold (more sensitive)
        mzt learning-drift -w 10          # Larger comparison window
        mzt learning-drift --summary      # Just show summary stats
        mzt learning-drift --json         # JSON output for scripting
    """
    from marianne.learning.global_store import get_global_store

    store = get_global_store()

    if summary:
        drift_summary = store.get_pattern_drift_summary()

        if json_output:
            console.print(json_lib.dumps(drift_summary, indent=2))
            return

        console.print("[bold]Pattern Drift Summary[/bold]\n")
        console.print(f"  Total patterns: {drift_summary['total_patterns']}")
        console.print(f"  Patterns analyzed: {drift_summary['patterns_analyzed']}")
        drifting = drift_summary["patterns_drifting"]
        color = "red" if drifting > 0 else "green"
        console.print(f"  Patterns drifting: [{color}]{drifting}[/{color}]")
        console.print(
            f"  Avg drift magnitude: {drift_summary['avg_drift_magnitude']:.3f}"
        )
        if drift_summary["most_drifted"]:
            console.print(
                f"  Most drifted pattern: {drift_summary['most_drifted']}"
            )
        return

    drifting_patterns = store.get_drifting_patterns(
        drift_threshold=threshold,
        window_size=window,
        limit=limit,
    )

    if json_output:
        output = {
            "threshold": threshold,
            "window_size": window,
            "patterns": [
                {
                    "pattern_id": m.pattern_id,
                    "pattern_name": m.pattern_name,
                    "effectiveness_before": round(m.effectiveness_before, 3),
                    "effectiveness_after": round(m.effectiveness_after, 3),
                    "drift_magnitude": round(m.drift_magnitude, 3),
                    "drift_direction": m.drift_direction,
                    "grounding_confidence_avg": round(m.grounding_confidence_avg, 3),
                    "applications_analyzed": m.applications_analyzed,
                }
                for m in drifting_patterns
            ],
        }
        console.print(json_lib.dumps(output, indent=2))
        return

    console.print(f"[bold]Patterns with Drift > {threshold:.0%}[/bold]")
    console.print(f"[dim]Window size: {window} applications per period[/dim]\n")

    if not drifting_patterns:
        console.print("[green]✓ No patterns exceeding drift threshold[/green]")
        console.print("[dim]All patterns are stable or improving[/dim]")
        return

    table = Table(show_header=True, header_style="bold")
    table.add_column("Pattern", style="cyan")
    table.add_column("Before", justify="right")
    table.add_column("After", justify="right")
    table.add_column("Drift", justify="right")
    table.add_column("Direction", justify="center")
    table.add_column("Grounding", justify="right")

    for m in drifting_patterns:
        if m.drift_direction == "negative":
            dir_style = "[red]↓ declining[/red]"
        elif m.drift_direction == "positive":
            dir_style = "[green]↑ improving[/green]"
        else:
            dir_style = "[dim]→ stable[/dim]"

        drift_color = "red" if m.drift_magnitude > 0.3 else "yellow"

        table.add_row(
            m.pattern_name[:30],
            f"{m.effectiveness_before:.1%}",
            f"{m.effectiveness_after:.1%}",
            f"[{drift_color}]{m.drift_magnitude:.1%}[/{drift_color}]",
            dir_style,
            f"{m.grounding_confidence_avg:.1%}",
        )

    console.print(table)

    declining = [m for m in drifting_patterns if m.drift_direction == "negative"]
    if declining:
        console.print(
            f"\n[yellow]⚠ {len(declining)} pattern(s) showing declining effectiveness[/yellow]"
        )
        console.print("[dim]Consider reviewing these patterns for deprecation[/dim]")

learning_epistemic_drift

learning_epistemic_drift(threshold=Option(0.15, '--threshold', '-t', help='Epistemic drift threshold (0.0-1.0) to flag patterns'), window=Option(5, '--window', '-w', help='Window size for drift comparison'), limit=Option(10, '--limit', '-l', help='Maximum number of patterns to show'), json_output=Option(False, '--json', '-j', help='Output as JSON for machine parsing'), summary=Option(False, '--summary', '-s', help='Show only summary statistics'))

Detect patterns with epistemic drift (belief/confidence changes).

Epistemic drift tracks confidence changes over time, complementing effectiveness drift as a leading indicator of pattern health.

Examples:

mzt learning-epistemic-drift # Show patterns with belief drift mzt learning-epistemic-drift -t 0.1 # Lower threshold (more sensitive) mzt learning-epistemic-drift --summary # Just show summary stats mzt learning-epistemic-drift --json # JSON output for scripting

Source code in src/marianne/cli/commands/learning/_drift.py
def learning_epistemic_drift(
    threshold: float = typer.Option(
        0.15,
        "--threshold",
        "-t",
        help="Epistemic drift threshold (0.0-1.0) to flag patterns",
    ),
    window: int = typer.Option(
        5,
        "--window",
        "-w",
        help="Window size for drift comparison",
    ),
    limit: int = typer.Option(
        10,
        "--limit",
        "-l",
        help="Maximum number of patterns to show",
    ),
    json_output: bool = typer.Option(
        False,
        "--json",
        "-j",
        help="Output as JSON for machine parsing",
    ),
    summary: bool = typer.Option(
        False,
        "--summary",
        "-s",
        help="Show only summary statistics",
    ),
) -> None:
    """Detect patterns with epistemic drift (belief/confidence changes).

    Epistemic drift tracks confidence changes over time, complementing
    effectiveness drift as a leading indicator of pattern health.

    Examples:
        mzt learning-epistemic-drift            # Show patterns with belief drift
        mzt learning-epistemic-drift -t 0.1    # Lower threshold (more sensitive)
        mzt learning-epistemic-drift --summary # Just show summary stats
        mzt learning-epistemic-drift --json    # JSON output for scripting
    """
    from marianne.learning.global_store import get_global_store

    store = get_global_store()

    if summary:
        drift_summary = store.get_epistemic_drift_summary()

        if json_output:
            console.print(json_lib.dumps(drift_summary, indent=2))
            return

        console.print("[bold]Epistemic Drift Summary[/bold]\n")
        console.print(f"  Total patterns: {drift_summary['total_patterns']}")
        console.print(f"  Patterns analyzed: {drift_summary['patterns_analyzed']}")
        drifting = drift_summary["patterns_with_epistemic_drift"]
        color = "red" if drifting > 0 else "green"
        console.print(f"  Patterns with drift: [{color}]{drifting}[/{color}]")
        console.print(
            f"  Avg belief change: {drift_summary['avg_belief_change']:.3f}"
        )
        console.print(
            f"  Avg belief entropy: {drift_summary['avg_belief_entropy']:.3f}"
        )
        if drift_summary["most_unstable"]:
            console.print(
                f"  Most unstable pattern: {drift_summary['most_unstable']}"
            )
        return

    drifting_patterns = store.get_epistemic_drifting_patterns(
        drift_threshold=threshold,
        window_size=window,
        limit=limit,
    )

    if json_output:
        output = {
            "threshold": threshold,
            "window_size": window,
            "patterns": [
                {
                    "pattern_id": m.pattern_id,
                    "pattern_name": m.pattern_name,
                    "confidence_before": round(m.confidence_before, 3),
                    "confidence_after": round(m.confidence_after, 3),
                    "belief_change": round(m.belief_change, 3),
                    "belief_entropy": round(m.belief_entropy, 3),
                    "drift_direction": m.drift_direction,
                    "applications_analyzed": m.applications_analyzed,
                }
                for m in drifting_patterns
            ],
        }
        console.print(json_lib.dumps(output, indent=2))
        return

    console.print(f"[bold]Patterns with Epistemic Drift > {threshold:.0%}[/bold]")
    console.print(f"[dim]Window size: {window} applications per period[/dim]\n")

    if not drifting_patterns:
        console.print("[green]✓ No patterns exceeding epistemic drift threshold[/green]")
        console.print("[dim]All pattern beliefs are stable[/dim]")
        return

    table = Table(show_header=True, header_style="bold")
    table.add_column("Pattern", style="cyan")
    table.add_column("Conf Before", justify="right")
    table.add_column("Conf After", justify="right")
    table.add_column("Change", justify="right")
    table.add_column("Direction", justify="center")
    table.add_column("Entropy", justify="right")

    for m in drifting_patterns:
        if m.drift_direction == "weakening":
            dir_style = "[red]↓ weakening[/red]"
        elif m.drift_direction == "strengthening":
            dir_style = "[green]↑ strengthening[/green]"
        else:
            dir_style = "[dim]→ stable[/dim]"

        change_color = "red" if abs(m.belief_change) > 0.2 else "yellow"
        entropy_color = "red" if m.belief_entropy > 0.3 else "dim"

        table.add_row(
            m.pattern_name[:30],
            f"{m.confidence_before:.1%}",
            f"{m.confidence_after:.1%}",
            f"[{change_color}]{m.belief_change:+.1%}[/{change_color}]",
            dir_style,
            f"[{entropy_color}]{m.belief_entropy:.2f}[/{entropy_color}]",
        )

    console.print(table)

    weakening = [m for m in drifting_patterns if m.drift_direction == "weakening"]
    if weakening:
        console.print(
            f"\n[yellow]⚠ {len(weakening)} pattern(s) showing weakening confidence[/yellow]"
        )
        console.print(
            "[dim]These patterns may need investigation "
            "before effectiveness declines[/dim]"
        )

    high_entropy = [m for m in drifting_patterns if m.belief_entropy > 0.3]
    if high_entropy:
        console.print(
            f"\n[yellow]⚠ {len(high_entropy)} pattern(s) with high belief entropy[/yellow]"
        )
        console.print("[dim]Inconsistent confidence suggests unstable pattern application[/dim]")

entropy_status

entropy_status(job=Option(None, '--job', '-j', help='Filter by specific job hash'), history=Option(False, '--history', '-H', help='Show entropy response history'), limit=Option(20, '--limit', '-n', help='Number of history records to show'), json_output=Option(False, '--json', help='Output as JSON for machine parsing'), check=Option(False, '--check', '-c', help='Check if entropy response is needed (dry-run)'))

Display entropy response status and history.

When pattern entropy drops below threshold, the system automatically: - Boosts the exploration budget to encourage diversity - Revisits quarantined patterns for potential revalidation

Examples:

marianne entropy-status # Show current entropy response status marianne entropy-status --history # View response history marianne entropy-status --check # Check if response is needed now marianne entropy-status --json # JSON output for scripting

Source code in src/marianne/cli/commands/learning/_entropy.py
def entropy_status(
    job: str = typer.Option(
        None,
        "--job",
        "-j",
        help="Filter by specific job hash",
    ),
    history: bool = typer.Option(
        False,
        "--history",
        "-H",
        help="Show entropy response history",
    ),
    limit: int = typer.Option(
        20,
        "--limit",
        "-n",
        help="Number of history records to show",
    ),
    json_output: bool = typer.Option(
        False,
        "--json",
        help="Output as JSON for machine parsing",
    ),
    check: bool = typer.Option(
        False,
        "--check",
        "-c",
        help="Check if entropy response is needed (dry-run)",
    ),
) -> None:
    """Display entropy response status and history.

    When pattern entropy drops below threshold, the system automatically:
    - Boosts the exploration budget to encourage diversity
    - Revisits quarantined patterns for potential revalidation

    Examples:
        marianne entropy-status               # Show current entropy response status
        marianne entropy-status --history     # View response history
        marianne entropy-status --check       # Check if response is needed now
        marianne entropy-status --json        # JSON output for scripting
    """
    from marianne.learning.global_store import get_global_store

    store = get_global_store()

    if check:
        needs_response, entropy, reason = store.check_entropy_response_needed(
            job_hash=job or "default"
        )

        if json_output:
            output = {
                "needs_response": needs_response,
                "current_entropy": round(entropy, 4) if entropy else None,
                "reason": reason,
            }
            console.print(json_lib.dumps(output, indent=2))
            return

        console.print("[bold]Entropy Response Check[/bold]\n")
        if needs_response:
            console.print("[red bold]✓ Response NEEDED[/red bold]")
            console.print(f"  Current Entropy: [red]{entropy:.4f}[/red]")
            console.print(f"  Reason: {reason}")
            console.print(
                "\n[dim]Enable entropy_response in learning config "
                "to trigger automatically.[/dim]"
            )
        else:
            console.print("[green]✗ No response needed[/green]")
            if entropy is not None:
                console.print(f"  Current Entropy: [green]{entropy:.4f}[/green]")
            console.print(f"  Reason: {reason}")
        return

    if history:
        history_records = store.get_entropy_response_history(
            job_hash=job, limit=limit
        )

        if json_output:
            resp_hist_output: list[dict[str, Any]] = []
            for record in history_records:
                resp_hist_output.append({
                    "id": record.id,
                    "job_hash": record.job_hash,
                    "recorded_at": record.recorded_at.isoformat(),
                    "entropy_at_trigger": round(record.entropy_at_trigger, 4),
                    "threshold_used": round(record.threshold_used, 4),
                    "actions_taken": record.actions_taken,
                    "budget_boosted": record.budget_boosted,
                    "quarantine_revisits": record.quarantine_revisits,
                })
            console.print(json_lib.dumps(resp_hist_output, indent=2))
            return

        if not history_records:
            console.print("[dim]No entropy response history found.[/dim]")
            console.print(
                "\n[dim]Hint: Enable entropy_response in learning config "
                "to start tracking.[/dim]"
            )
            return

        table = Table(title="Entropy Response History")
        table.add_column("Time", style="dim", width=16)
        table.add_column("Entropy", justify="right", width=8)
        table.add_column("Threshold", justify="right", width=9)
        table.add_column("Budget+", justify="center", width=8)
        table.add_column("Revisits", justify="right", width=8)
        table.add_column("Actions", width=25)

        for record in history_records:
            budget_str = "[green]Yes[/green]" if record.budget_boosted else "[dim]No[/dim]"
            if record.quarantine_revisits > 0:
                revisit_str = f"[cyan]{record.quarantine_revisits}[/cyan]"
            else:
                revisit_str = "[dim]0[/dim]"
            actions_str = (
                ", ".join(record.actions_taken) if record.actions_taken else "[dim]none[/dim]"
            )

            table.add_row(
                record.recorded_at.strftime("%m-%d %H:%M:%S"),
                f"[red]{record.entropy_at_trigger:.3f}[/red]",
                f"{record.threshold_used:.3f}",
                budget_str,
                revisit_str,
                actions_str,
            )

        console.print(table)
        console.print(f"\n[dim]Showing {len(history_records)} record(s)[/dim]")
        return

    stats = store.get_entropy_response_statistics(job_hash=job)
    last = store.get_last_entropy_response(job_hash=job)

    if json_output:
        last_resp: dict[str, Any] | None = None
        if last:
            last_resp = {
                "entropy_at_trigger": round(last.entropy_at_trigger, 4),
                "threshold_used": round(last.threshold_used, 4),
                "actions_taken": last.actions_taken,
                "recorded_at": last.recorded_at.isoformat(),
            }
        status_output: dict[str, Any] = {
            "statistics": {
                "total_responses": stats["total_responses"],
                "avg_entropy_at_trigger": round(stats["avg_entropy_at_trigger"], 4),
                "budget_boosts": stats["budget_boosts"],
                "quarantine_revisits": stats["quarantine_revisits"],
                "last_response": stats["last_response"],
            },
            "last_response": last_resp,
        }
        console.print(json_lib.dumps(status_output, indent=2))
        return

    console.print("[bold]Entropy Response Status[/bold]\n")

    if stats["total_responses"] == 0:
        console.print("[dim]No entropy responses recorded yet.[/dim]")
        console.print("\n[dim]Hint: Enable entropy_response in learning config:[/dim]")
        console.print("[dim]  learning:[/dim]")
        console.print("[dim]    entropy_response:[/dim]")
        console.print("[dim]      enabled: true[/dim]")
        return

    console.print(f"  Total Responses: [yellow]{stats['total_responses']}[/yellow]")
    console.print(f"  Avg Trigger Entropy: [red]{stats['avg_entropy_at_trigger']:.4f}[/red]")
    console.print(f"  Budget Boosts: [green]{stats['budget_boosts']}[/green]")
    console.print(f"  Quarantine Revisits: [cyan]{stats['quarantine_revisits']}[/cyan]")

    console.print("")

    if last:
        console.print("[bold]Last Response[/bold]")
        console.print(f"  Time: [dim]{last.recorded_at.strftime('%Y-%m-%d %H:%M:%S')}[/dim]")
        console.print(f"  Entropy at Trigger: [red]{last.entropy_at_trigger:.4f}[/red]")
        console.print(f"  Threshold: [dim]{last.threshold_used:.4f}[/dim]")
        actions = ', '.join(last.actions_taken) if last.actions_taken else '[dim]none[/dim]'
        console.print(f"  Actions: {actions}")

    console.print("")
    if stats["total_responses"] > 10:
        console.print("[yellow]⚠ Many responses triggered[/yellow]")
        console.print("[dim]Pattern diversity may be consistently low. Review patterns.[/dim]")
    elif stats["quarantine_revisits"] > stats["total_responses"]:
        console.print("[cyan]ℹ Active quarantine revisiting[/cyan]")
        console.print("[dim]Previously problematic patterns are being reconsidered.[/dim]")
    else:
        console.print("[green]✓ Entropy response system active[/green]")

patterns_entropy

patterns_entropy(alert_threshold=Option(0.5, '--threshold', '-t', help='Diversity index below this triggers alert (0.0-1.0)'), history=Option(False, '--history', '-H', help='Show entropy history over time'), limit=Option(20, '--limit', '-n', help='Number of history records to show'), json_output=Option(False, '--json', '-j', help='Output as JSON for machine parsing'), record=Option(False, '--record', '-r', help='Record current entropy to history'))

Monitor pattern population diversity using Shannon entropy.

Shannon entropy measures how evenly patterns are used: - High entropy (H -> max): Healthy diversity, many patterns contribute - Low entropy (H -> 0): Single pattern dominates (collapse risk)

Examples:

mzt patterns-entropy # Show current entropy metrics mzt patterns-entropy --threshold 0.3 # Alert on low diversity mzt patterns-entropy --history # View entropy trend over time mzt patterns-entropy --record # Record snapshot for trend analysis mzt patterns-entropy --json # JSON output for scripting

Source code in src/marianne/cli/commands/learning/_entropy.py
def patterns_entropy(
    alert_threshold: float = typer.Option(
        0.5,
        "--threshold",
        "-t",
        help="Diversity index below this triggers alert (0.0-1.0)",
    ),
    history: bool = typer.Option(
        False,
        "--history",
        "-H",
        help="Show entropy history over time",
    ),
    limit: int = typer.Option(
        20,
        "--limit",
        "-n",
        help="Number of history records to show",
    ),
    json_output: bool = typer.Option(
        False,
        "--json",
        "-j",
        help="Output as JSON for machine parsing",
    ),
    record: bool = typer.Option(
        False,
        "--record",
        "-r",
        help="Record current entropy to history",
    ),
) -> None:
    """Monitor pattern population diversity using Shannon entropy.

    Shannon entropy measures how evenly patterns are used:
    - High entropy (H -> max): Healthy diversity, many patterns contribute
    - Low entropy (H -> 0): Single pattern dominates (collapse risk)

    Examples:
        mzt patterns-entropy               # Show current entropy metrics
        mzt patterns-entropy --threshold 0.3  # Alert on low diversity
        mzt patterns-entropy --history     # View entropy trend over time
        mzt patterns-entropy --record      # Record snapshot for trend analysis
        mzt patterns-entropy --json        # JSON output for scripting
    """
    from marianne.learning.global_store import get_global_store

    store = get_global_store()

    if history:
        history_records = store.get_pattern_entropy_history(limit=limit)

        if json_output:
            hist_output: list[dict[str, Any]] = []
            for entry in history_records:
                hist_output.append({
                    "calculated_at": entry.calculated_at.isoformat(),
                    "shannon_entropy": round(entry.shannon_entropy, 4),
                    "max_possible_entropy": round(entry.max_possible_entropy, 4),
                    "diversity_index": round(entry.diversity_index, 4),
                    "unique_pattern_count": entry.unique_pattern_count,
                    "effective_pattern_count": entry.effective_pattern_count,
                    "total_applications": entry.total_applications,
                    "dominant_pattern_share": round(entry.dominant_pattern_share, 4),
                    "threshold_exceeded": entry.threshold_exceeded,
                })
            console.print(json_lib.dumps(hist_output, indent=2))
            return

        if not history_records:
            console.print("[dim]No entropy history found.[/dim]")
            console.print("\n[dim]Hint: Use --record to start tracking entropy over time.[/dim]")
            return

        table = Table(title="Pattern Entropy History")
        table.add_column("Time", style="dim", width=20)
        table.add_column("Shannon H", justify="right", width=10)
        table.add_column("Diversity", justify="right", width=10)
        table.add_column("Unique", justify="right", width=8)
        table.add_column("Effective", justify="right", width=10)
        table.add_column("Applications", justify="right", width=12)
        table.add_column("Dominant %", justify="right", width=10)

        for entry in history_records:
            div_color = "green" if entry.diversity_index >= alert_threshold else "red"
            div_str = f"[{div_color}]{entry.diversity_index:.3f}[/{div_color}]"

            if entry.dominant_pattern_share > 0.5:
                dom_color = "red"
            elif entry.dominant_pattern_share > 0.3:
                dom_color = "yellow"
            else:
                dom_color = "green"
            dom_str = f"[{dom_color}]{entry.dominant_pattern_share:.1%}[/{dom_color}]"

            table.add_row(
                entry.calculated_at.strftime("%Y-%m-%d %H:%M"),
                f"{entry.shannon_entropy:.3f}",
                div_str,
                str(entry.unique_pattern_count),
                str(entry.effective_pattern_count),
                str(entry.total_applications),
                dom_str,
            )

        console.print(table)
        console.print(f"\n[dim]Showing {len(history_records)} record(s)[/dim]")
        return

    metrics = store.calculate_pattern_entropy()
    metrics.threshold_exceeded = metrics.diversity_index < alert_threshold

    if record:
        record_id = store.record_pattern_entropy(metrics)
        console.print(f"[green]Recorded entropy snapshot: {record_id[:10]}[/green]\n")

    if json_output:
        output = {
            "calculated_at": metrics.calculated_at.isoformat(),
            "shannon_entropy": round(metrics.shannon_entropy, 4),
            "max_possible_entropy": round(metrics.max_possible_entropy, 4),
            "diversity_index": round(metrics.diversity_index, 4),
            "unique_pattern_count": metrics.unique_pattern_count,
            "effective_pattern_count": metrics.effective_pattern_count,
            "total_applications": metrics.total_applications,
            "dominant_pattern_share": round(metrics.dominant_pattern_share, 4),
            "threshold_exceeded": metrics.threshold_exceeded,
            "alert_threshold": alert_threshold,
        }
        console.print(json_lib.dumps(output, indent=2))
        return

    console.print("[bold]Pattern Population Entropy[/bold]\n")

    if metrics.total_applications == 0:
        console.print("[dim]No pattern applications yet.[/dim]")
        console.print("\n[dim]Hint: Run jobs with learning enabled to build patterns.[/dim]")
        return

    console.print(f"  Shannon Entropy (H): [cyan]{metrics.shannon_entropy:.4f}[/cyan] bits")
    console.print(f"  Max Possible (H_max): [dim]{metrics.max_possible_entropy:.4f}[/dim] bits")

    div_color = "green" if metrics.diversity_index >= alert_threshold else "red"
    console.print(
        f"  Diversity Index: [{div_color}]{metrics.diversity_index:.4f}"
        f"[/{div_color}] (threshold: {alert_threshold})"
    )

    console.print("")

    console.print(f"  Unique Patterns: [yellow]{metrics.unique_pattern_count}[/yellow]")
    console.print(
        f"  Effective Patterns: [yellow]{metrics.effective_pattern_count}"
        "[/yellow] (with ≥1 application)"
    )
    console.print(f"  Total Applications: [yellow]{metrics.total_applications}[/yellow]")

    if metrics.dominant_pattern_share > 0.5:
        dom_color = "red"
    elif metrics.dominant_pattern_share > 0.3:
        dom_color = "yellow"
    else:
        dom_color = "green"
    console.print(
        f"  Dominant Pattern Share: [{dom_color}]"
        f"{metrics.dominant_pattern_share:.1%}[/{dom_color}]"
    )

    if metrics.threshold_exceeded:
        output_error(
            "Pattern population shows low diversity - model collapse risk!",
            severity="warning",
            hints=["Consider reviewing dominant patterns and encouraging exploration."],
        )
    elif metrics.dominant_pattern_share > 0.5:
        output_error(
            "Single pattern holds >50% of applications",
            severity="warning",
            hints=["Monitor for further concentration."],
        )
    else:
        console.print("\n[green]✓ Healthy pattern diversity[/green]")

learning_export

learning_export(output_dir='./learning-export', fmt='markdown', since=30, include_pending=True, min_effectiveness=0.0)

Export learning store data to workspace files.

Writes structured files for consumption by evolution scores: semantic-insights, drift-report, entropy-state, pattern-health, evolution-history, error-landscape.

Examples:

mzt learning-export --output-dir ./workspace/learning mzt learning-export --format json --since 60 mzt learning-export --min-effectiveness 0.6 --no-include-pending

Source code in src/marianne/cli/commands/learning/_export.py
def learning_export(
    output_dir: Annotated[
        str,
        typer.Option("--output-dir", "-o", help="Directory to write export files"),
    ] = "./learning-export",
    fmt: Annotated[
        str,
        typer.Option("--format", "-f", help="Output format: markdown or json"),
    ] = "markdown",
    since: Annotated[
        int,
        typer.Option("--since", "-s", help="Export data from last N days"),
    ] = 30,
    include_pending: Annotated[
        bool,
        typer.Option(
            "--include-pending/--no-include-pending",
            help="Include PENDING quarantine patterns in export (default: True)",
        ),
    ] = True,
    min_effectiveness: Annotated[
        float,
        typer.Option(
            "--min-effectiveness",
            help="Minimum effectiveness score (0.0-1.0) for exported patterns",
        ),
    ] = 0.0,
) -> None:
    """Export learning store data to workspace files.

    Writes structured files for consumption by evolution scores:
    semantic-insights, drift-report, entropy-state, pattern-health,
    evolution-history, error-landscape.

    Examples:
        mzt learning-export --output-dir ./workspace/learning
        mzt learning-export --format json --since 60
        mzt learning-export --min-effectiveness 0.6 --no-include-pending
    """
    from marianne.learning.global_store import get_global_store
    from marianne.learning.patterns import PatternType
    from marianne.learning.store.models import QuarantineStatus

    store = get_global_store()
    out = Path(output_dir)
    out.mkdir(parents=True, exist_ok=True)

    ext = "json" if fmt == "json" else "md"

    # Build filter description for headers
    filters_desc = []
    if not include_pending:
        filters_desc.append("excluding PENDING patterns")
    if min_effectiveness > 0.0:
        filters_desc.append(f"min_effectiveness >= {min_effectiveness:.1%}")
    filter_note = (
        f"Filters applied: {', '.join(filters_desc)}"
        if filters_desc
        else "No filters applied (all patterns exported)"
    )

    # 1. Semantic insights
    # Fix: Use PatternType.SEMANTIC_INSIGHT.value (lowercase "semantic_insight")
    # Apply effectiveness filter by checking effectiveness_score after retrieval
    all_semantic = store.get_patterns(
        pattern_type=PatternType.SEMANTIC_INSIGHT.value,
        min_priority=0.0,
        limit=1000,
        exclude_quarantined=False,  # Handle PENDING separately
    )

    # Apply custom filters
    insights = []
    for p in all_semantic:
        # Filter by quarantine status
        q_status = getattr(p, "quarantine_status", None)
        if not include_pending and q_status == QuarantineStatus.PENDING:
            continue

        # Filter by effectiveness
        if (p.effectiveness_score or 0.0) < min_effectiveness:
            continue

        insights.append(p)
    if fmt == "json":
        insights_data: Any = [
            {
                "id": p.id,
                "name": p.pattern_name,
                "type": p.pattern_type,
                "description": p.description,
                "effectiveness": p.effectiveness_score,
                "trust": p.trust_score,
                "occurrences": p.occurrence_count,
                "tags": getattr(p, "context_tags", []),
                "quarantine_status": getattr(p, "quarantine_status", None),
            }
            for p in insights
        ]
        _write_file(
            out / f"semantic-insights.{ext}",
            json_lib.dumps(insights_data, indent=2),
        )
    else:
        _write_file(
            out / f"semantic-insights.{ext}",
            _format_markdown_insights(insights, filter_note),
        )

    # 2. Drift report
    eff_drift = store.get_drifting_patterns(drift_threshold=0.15, limit=30)
    epi_drift = store.get_epistemic_drifting_patterns(
        drift_threshold=0.1, limit=30
    )
    if fmt == "json":
        drift_data: Any = {
            "effectiveness_drift": [
                {
                    "pattern_id": m.pattern_id,
                    "name": m.pattern_name,
                    "before": m.effectiveness_before,
                    "after": m.effectiveness_after,
                    "magnitude": m.drift_magnitude,
                    "direction": m.drift_direction,
                }
                for m in eff_drift
            ],
            "epistemic_drift": [
                {
                    "pattern_id": m.pattern_id,
                    "name": m.pattern_name,
                    "confidence_before": m.confidence_before,
                    "confidence_after": m.confidence_after,
                    "change": m.belief_change,
                    "direction": m.drift_direction,
                }
                for m in epi_drift
            ],
        }
        _write_file(
            out / f"drift-report.{ext}", json_lib.dumps(drift_data, indent=2)
        )
    else:
        _write_file(
            out / f"drift-report.{ext}",
            _format_markdown_drift(eff_drift, epi_drift),
        )

    # 3. Entropy state
    entropy = store.calculate_pattern_entropy()
    alerts = store.get_entropy_response_history(limit=10)
    if fmt == "json":
        entropy_data: Any = {
            "shannon_entropy": entropy.shannon_entropy,
            "diversity_index": entropy.diversity_index,
            "unique_patterns": entropy.unique_pattern_count,
            "effective_patterns": entropy.effective_pattern_count,
            "dominant_share": entropy.dominant_pattern_share,
            "recent_responses": [str(a) for a in alerts],
        }
        _write_file(
            out / f"entropy-state.{ext}",
            json_lib.dumps(entropy_data, indent=2),
        )
    else:
        _write_file(
            out / f"entropy-state.{ext}",
            _format_markdown_entropy(entropy, alerts),
        )

    # 4. Pattern health
    all_patterns = store.get_patterns(
        min_priority=0.0, limit=500, exclude_quarantined=False
    )
    if fmt == "json":
        health_data: Any = {
            "quarantined": [
                {"id": p.id, "name": p.pattern_name}
                for p in all_patterns
                if getattr(p, "quarantine_status", "") == "QUARANTINED"
            ],
            "low_trust": [
                {"id": p.id, "name": p.pattern_name, "trust": p.trust_score}
                for p in all_patterns
                if (p.trust_score or 1.0) < 0.3
            ],
        }
        _write_file(
            out / f"pattern-health.{ext}",
            json_lib.dumps(health_data, indent=2),
        )
    else:
        _write_file(
            out / f"pattern-health.{ext}",
            _format_markdown_health(all_patterns, filter_note),
        )

    # 5. Evolution history
    trajectory = store.get_trajectory(limit=5)
    if fmt == "json":
        traj_data: Any = [
            {
                "cycle": e.cycle,
                "date": str(e.recorded_at),
                "completed": e.evolutions_completed,
                "deferred": e.evolutions_deferred,
                "issues": e.issue_classes,
                "impl_loc": e.implementation_loc,
                "test_loc": e.test_loc,
            }
            for e in trajectory
        ]
        _write_file(
            out / f"evolution-history.{ext}",
            json_lib.dumps(traj_data, indent=2),
        )
    else:
        _write_file(
            out / f"evolution-history.{ext}",
            _format_markdown_evolution(trajectory),
        )

    # 6. Error landscape
    exec_stats = store.get_execution_stats()
    if fmt == "json":
        _write_file(
            out / f"error-landscape.{ext}",
            json_lib.dumps(exec_stats, indent=2),
        )
    else:
        _write_file(
            out / f"error-landscape.{ext}", _format_markdown_errors(exec_stats)
        )

    console.print(f"[green]Exported learning data to {out}/[/green]")
    console.print(f"  Semantic insights: {len(insights)} patterns")
    console.print(
        f"  Drift alerts: {len(eff_drift)} effectiveness, "
        f"{len(epi_drift)} epistemic"
    )
    console.print(f"  Patterns total: {len(all_patterns)}")
    console.print(f"  Evolution history: {len(trajectory)} cycles")

learning_record_evolution

learning_record_evolution(cycle=..., evolutions_completed=..., issue_classes=..., implementation_loc=..., test_loc=..., evolutions_deferred=0, cv_avg=0.0, loc_accuracy=1.0, notes='')

Record an evolution cycle in the trajectory table.

Examples:

mzt learning-record-evolution --cycle 26 \ --evolutions-completed 2 \ --issue-classes "infrastructure_activation,testing_depth" \ --implementation-loc 150 --test-loc 200

Source code in src/marianne/cli/commands/learning/_export.py
def learning_record_evolution(
    cycle: Annotated[
        int, typer.Option("--cycle", help="Evolution cycle number")
    ] = ...,  # type: ignore[assignment]
    evolutions_completed: Annotated[
        int,
        typer.Option(
            "--evolutions-completed", help="Number of evolutions completed"
        ),
    ] = ...,  # type: ignore[assignment]
    issue_classes: Annotated[
        str,
        typer.Option("--issue-classes", help="Comma-separated issue classes"),
    ] = ...,  # type: ignore[assignment]
    implementation_loc: Annotated[
        int,
        typer.Option(
            "--implementation-loc", help="Lines of implementation code"
        ),
    ] = ...,  # type: ignore[assignment]
    test_loc: Annotated[
        int, typer.Option("--test-loc", help="Lines of test code")
    ] = ...,  # type: ignore[assignment]
    evolutions_deferred: Annotated[
        int, typer.Option("--evolutions-deferred", help="Number deferred")
    ] = 0,
    cv_avg: Annotated[
        float, typer.Option("--cv-avg", help="Average consciousness volume")
    ] = 0.0,
    loc_accuracy: Annotated[
        float,
        typer.Option("--loc-accuracy", help="LOC estimation accuracy (0-2)"),
    ] = 1.0,
    notes: Annotated[
        str, typer.Option("--notes", help="Optional notes")
    ] = "",
) -> None:
    """Record an evolution cycle in the trajectory table.

    Examples:
        mzt learning-record-evolution --cycle 26 \\
            --evolutions-completed 2 \\
            --issue-classes "infrastructure_activation,testing_depth" \\
            --implementation-loc 150 --test-loc 200
    """
    from marianne.learning.global_store import get_global_store

    store = get_global_store()

    classes = [c.strip() for c in issue_classes.split(",") if c.strip()]

    entry_id = store.record_evolution_entry(
        cycle=cycle,
        evolutions_completed=evolutions_completed,
        evolutions_deferred=evolutions_deferred,
        issue_classes=classes,
        cv_avg=cv_avg,
        implementation_loc=implementation_loc,
        test_loc=test_loc,
        loc_accuracy=loc_accuracy,
        notes=notes,
    )

    console.print(f"[green]Recorded evolution cycle {cycle}[/green]")
    console.print(f"  Entry ID: {entry_id}")
    console.print(f"  Evolutions completed: {evolutions_completed}")
    console.print(f"  Issue classes: {', '.join(classes)}")

patterns_list

patterns_list(global_patterns=Option(True, '--global/--local', '-g/-l', help='Show global patterns (default) or local workspace patterns'), min_priority=Option(0.0, '--min-priority', '-p', help='Minimum priority score to display (0.0-1.0)'), limit=Option(20, '--limit', '-n', help='Maximum number of patterns to display'), json_output=Option(False, '--json', '-j', help='Output as JSON for machine parsing'), quarantined=Option(False, '--quarantined', '-q', help='Show only quarantined patterns'), high_trust=Option(False, '--high-trust', help='Show only patterns with trust >= 0.7'), low_trust=Option(False, '--low-trust', help='Show only patterns with trust <= 0.3'))

View global learning patterns.

Displays patterns learned from job executions across all workspaces.

Examples:

mzt patterns-list # Show global patterns mzt patterns-list --min-priority 0.5 # Only high-priority patterns mzt patterns-list --json # JSON output for scripting mzt patterns-list --quarantined # Show quarantined patterns mzt patterns-list --high-trust # Show trusted patterns only

Source code in src/marianne/cli/commands/learning/_patterns.py
def patterns_list(
    global_patterns: bool = typer.Option(
        True,
        "--global/--local",
        "-g/-l",
        help="Show global patterns (default) or local workspace patterns",
    ),
    min_priority: float = typer.Option(
        0.0,
        "--min-priority",
        "-p",
        help="Minimum priority score to display (0.0-1.0)",
    ),
    limit: int = typer.Option(
        20,
        "--limit",
        "-n",
        help="Maximum number of patterns to display",
    ),
    json_output: bool = typer.Option(
        False,
        "--json",
        "-j",
        help="Output as JSON for machine parsing",
    ),
    quarantined: bool = typer.Option(
        False,
        "--quarantined",
        "-q",
        help="Show only quarantined patterns",
    ),
    high_trust: bool = typer.Option(
        False,
        "--high-trust",
        help="Show only patterns with trust >= 0.7",
    ),
    low_trust: bool = typer.Option(
        False,
        "--low-trust",
        help="Show only patterns with trust <= 0.3",
    ),
) -> None:
    """View global learning patterns.

    Displays patterns learned from job executions across all workspaces.

    Examples:
        mzt patterns-list                  # Show global patterns
        mzt patterns-list --min-priority 0.5  # Only high-priority patterns
        mzt patterns-list --json           # JSON output for scripting
        mzt patterns-list --quarantined    # Show quarantined patterns
        mzt patterns-list --high-trust     # Show trusted patterns only
    """
    from marianne.learning.global_store import QuarantineStatus, get_global_store

    store = get_global_store()

    filter_kwargs: dict[str, Any] = {
        "min_priority": min_priority,
        "limit": limit,
    }
    if quarantined:
        filter_kwargs["quarantine_status"] = QuarantineStatus.QUARANTINED
    if high_trust:
        filter_kwargs["min_trust"] = 0.7
    if low_trust:
        filter_kwargs["max_trust"] = 0.3

    if json_output:
        pattern_list_result = store.get_patterns(**filter_kwargs)
        output = []
        for p in pattern_list_result:
            output.append({
                "id": p.id,
                "pattern_type": p.pattern_type,
                "pattern_name": p.pattern_name,
                "description": p.description,
                "occurrence_count": p.occurrence_count,
                "effectiveness_score": round(p.effectiveness_score, 3),
                "priority_score": round(p.priority_score, 3),
                "context_tags": list(p.context_tags) if p.context_tags else [],
                "quarantine_status": p.quarantine_status.value,
                "trust_score": round(p.trust_score, 3),
            })
        console.print(json_lib.dumps(output, indent=2))
        return

    pattern_list_result = store.get_patterns(**filter_kwargs)

    if not pattern_list_result:
        console.print("[dim]No patterns found in global learning store.[/dim]")
        console.print(
            "\n[dim]Hint: Patterns are learned from job executions. "
            "Run jobs with learning enabled to build patterns.[/dim]"
        )
        return

    table = Table(title="Global Learning Patterns")
    table.add_column("ID", style="cyan", no_wrap=True, width=10)
    table.add_column("Type", style="yellow", width=12)
    table.add_column("Name", style="bold", width=20)
    table.add_column("Status", width=10)
    table.add_column("Trust", justify="right", width=6)
    table.add_column("Auto", justify="center", width=4)
    table.add_column("Count", justify="right", width=5)
    table.add_column("Effect", justify="right", width=6)
    table.add_column("Prior", justify="right", width=5)

    for p in pattern_list_result:
        is_auto_eligible = (
            p.trust_score >= 0.85
            and p.quarantine_status.value == "validated"
        )
        auto_str = "[green]⚡[/green]" if is_auto_eligible else ""

        eff = p.effectiveness_score
        eff_color = "green" if eff > 0.7 else "yellow" if eff > 0.4 else "red"
        eff_str = f"[{eff_color}]{eff:.2f}[/{eff_color}]"

        pri = p.priority_score
        pri_color = "green" if pri > 0.7 else "yellow" if pri > 0.4 else "dim"
        pri_str = f"[{pri_color}]{pri:.2f}[/{pri_color}]"

        status = p.quarantine_status.value
        status_colors = {
            "pending": "dim",
            "quarantined": "red",
            "validated": "green",
            "retired": "dim italic",
        }
        status_color = status_colors.get(status, "dim")
        status_str = f"[{status_color}]{status}[/{status_color}]"

        trust = p.trust_score
        trust_color = "green" if trust >= 0.7 else "yellow" if trust >= 0.4 else "red"
        trust_str = f"[{trust_color}]{trust:.2f}[/{trust_color}]"

        table.add_row(
            p.id[:10],
            p.pattern_type[:12],
            p.pattern_name[:20],
            status_str,
            trust_str,
            auto_str,
            str(p.occurrence_count),
            eff_str,
            pri_str,
        )

    console.print(table)
    console.print(f"\n[dim]Showing {len(pattern_list_result)} pattern(s)[/dim]")

patterns_why

patterns_why(pattern_id=Argument(None, help="Pattern ID to analyze (first 10 chars from 'patterns' command). If omitted, shows all patterns with captured success factors."), min_observations=Option(1, '--min-obs', '-m', help='Minimum success factor observations required'), limit=Option(10, '--limit', '-n', help='Maximum number of patterns to display'), json_output=Option(False, '--json', '-j', help='Output as JSON for machine parsing'))

Analyze WHY patterns succeed with metacognitive insights.

Shows success factors - the context conditions that contribute to pattern effectiveness. This helps understand CAUSALITY behind patterns, not just correlation.

v22 Evolution: Metacognitive Pattern Reflection

Examples:

mzt patterns-why # Show all patterns with WHY analysis mzt patterns-why abc123 # Analyze specific pattern mzt patterns-why --min-obs 3 # Only patterns with 3+ observations mzt patterns-why --json # JSON output for scripting

Source code in src/marianne/cli/commands/learning/_patterns.py
def patterns_why(
    pattern_id: str = typer.Argument(
        None,
        help="Pattern ID to analyze (first 10 chars from 'patterns' command). "
        "If omitted, shows all patterns with captured success factors.",
    ),
    min_observations: int = typer.Option(
        1,
        "--min-obs",
        "-m",
        help="Minimum success factor observations required",
    ),
    limit: int = typer.Option(
        10,
        "--limit",
        "-n",
        help="Maximum number of patterns to display",
    ),
    json_output: bool = typer.Option(
        False,
        "--json",
        "-j",
        help="Output as JSON for machine parsing",
    ),
) -> None:
    """Analyze WHY patterns succeed with metacognitive insights.

    Shows success factors - the context conditions that contribute to
    pattern effectiveness. This helps understand CAUSALITY behind patterns,
    not just correlation.

    v22 Evolution: Metacognitive Pattern Reflection

    Examples:
        mzt patterns-why              # Show all patterns with WHY analysis
        mzt patterns-why abc123       # Analyze specific pattern
        mzt patterns-why --min-obs 3  # Only patterns with 3+ observations
        mzt patterns-why --json       # JSON output for scripting
    """
    from marianne.learning.global_store import get_global_store

    store = get_global_store()

    if pattern_id:
        patterns_list = store.get_patterns(min_priority=0.0, limit=1000)
        matching = [p for p in patterns_list if p.id.startswith(pattern_id)]

        if not matching:
            output_error(
                f"No pattern found with ID starting with '{pattern_id}'",
                hints=["Run 'mzt patterns-list' to see available patterns."],
            )
            raise typer.Exit(1)

        if len(matching) > 1:
            match_lines = [f"  {p.id} - {p.pattern_name}" for p in matching[:5]]
            output_error(
                f"Multiple patterns match '{pattern_id}':\n" + "\n".join(match_lines),
                severity="warning",
                hints=["Provide more characters to uniquely identify the pattern."],
            )
            raise typer.Exit(1)

        pattern = matching[0]
        analysis = store.analyze_pattern_why(pattern.id)

        if json_output:
            console.print(json_lib.dumps(analysis, indent=2, default=str))
            return

        console.print(Panel(
            f"[bold]{analysis.get('pattern_name', 'Unknown')}[/bold]\n"
            f"[dim]Type: {analysis.get('pattern_type', 'unknown')}[/dim]",
            title="WHY Analysis",
            border_style="magenta",
        ))

        if not analysis.get("has_factors"):
            console.print("\n[yellow]No success factors captured yet.[/yellow]")
            console.print(
                "[dim]Apply this pattern to successful executions "
                "to capture WHY it works.[/dim]"
            )
            return

        console.print("\n[bold]Factors Summary[/bold]")
        console.print(f"  {analysis.get('factors_summary', 'No summary')}")

        key_conditions = analysis.get("key_conditions", [])
        if key_conditions:
            console.print("\n[bold]Key Conditions[/bold]")
            for cond in key_conditions:
                console.print(f"  • {cond}")

        console.print("\n[bold]Metrics[/bold]")
        table = Table(show_header=False, box=None)
        table.add_column("Field", style="dim", width=20)
        table.add_column("Value", style="bold")

        table.add_row("Observations", str(analysis.get("observation_count", 0)))
        table.add_row("Success Rate", f"{analysis.get('success_rate', 0):.1%}")
        table.add_row("Confidence", f"{analysis.get('confidence', 0):.2f}")
        table.add_row("Trust Score", f"{analysis.get('trust_score', 0):.2f}")
        table.add_row("Effectiveness", f"{analysis.get('effectiveness_score', 0):.2f}")

        console.print(table)

        recommendations = analysis.get("recommendations", [])
        if recommendations:
            console.print("\n[bold]Recommendations[/bold]")
            for rec in recommendations:
                console.print(f"  → {rec}")
    else:
        results = store.get_patterns_with_why(
            min_observations=min_observations,
            limit=limit,
        )

        if json_output:
            output = [
                {"pattern_id": p.id, "pattern_name": p.pattern_name, "analysis": a}
                for p, a in results
            ]
            console.print(json_lib.dumps(output, indent=2, default=str))
            return

        if not results:
            console.print(
                "[yellow]No patterns with captured success factors found.[/yellow]"
            )
            console.print(
                "[dim]Success factors are captured when patterns lead to "
                "successful executions.[/dim]"
            )
            return

        console.print(
            f"[bold]Patterns with WHY Analysis[/bold] "
            f"[dim]({len(results)} patterns with ≥{min_observations} observations)[/dim]\n"
        )

        table = Table(show_header=True, box=None)
        table.add_column("Pattern", style="cyan", width=30)
        table.add_column("Obs", justify="right", width=5)
        table.add_column("Success%", justify="right", width=8)
        table.add_column("Confidence", justify="right", width=10)
        table.add_column("Key Insight", width=40)

        for pattern, analysis in results:
            key_conditions = analysis.get("key_conditions", [])
            insight = key_conditions[0] if key_conditions else analysis.get("factors_summary", "")
            if len(insight) > 38:
                insight = insight[:35] + "..."

            obs = analysis.get("observation_count", 0)
            success_rate = analysis.get("success_rate", 0)
            confidence = analysis.get("confidence", 0)

            name = pattern.pattern_name
            display_name = f"{name[:28]}..." if len(name) > 30 else name
            table.add_row(
                display_name,
                str(obs),
                f"{success_rate:.0%}",
                f"{confidence:.2f}",
                insight,
            )

        console.print(table)
        console.print(
            "\n[dim]Use 'mzt patterns-why <id>' for detailed analysis.[/dim]"
        )

learning_activity

learning_activity(hours=Option(24, '--hours', '-h', help='Show activity from the last N hours'), json_output=Option(False, '--json', '-j', help='Output as JSON for machine parsing'))

View recent learning activity and pattern applications.

Examples:

mzt learning-activity # Last 24 hours of activity mzt learning-activity -h 48 # Last 48 hours mzt learning-activity --json # JSON output

Source code in src/marianne/cli/commands/learning/_stats.py
def learning_activity(
    hours: int = typer.Option(
        24,
        "--hours",
        "-h",
        help="Show activity from the last N hours",
    ),
    json_output: bool = typer.Option(
        False,
        "--json",
        "-j",
        help="Output as JSON for machine parsing",
    ),
) -> None:
    """View recent learning activity and pattern applications.

    Examples:
        mzt learning-activity           # Last 24 hours of activity
        mzt learning-activity -h 48     # Last 48 hours
        mzt learning-activity --json    # JSON output
    """
    from marianne.learning.global_store import get_global_store

    store = get_global_store()
    stats = store.get_execution_stats()

    window = store.get_optimal_execution_window()

    cutoff = datetime.now() - timedelta(hours=hours)
    recent_executions = store.get_similar_executions(limit=20)
    recent_count = sum(
        1 for e in recent_executions
        if e.completed_at and e.completed_at > cutoff
    )

    if json_output:
        output: dict[str, Any] = {
            "period_hours": hours,
            "recent_executions": recent_count,
            "success_without_retry_rate": round(
                stats.get("success_without_retry_rate", 0) * 100, 1
            ),
            "patterns_active": stats.get("total_patterns", 0),
            "optimal_hours": window.get("optimal_hours", []),
            "avoid_hours": window.get("avoid_hours", []),
            "scheduling_confidence": round(window.get("confidence", 0), 2),
        }
        console.print(json_lib.dumps(output, indent=2))
        return

    console.print(f"[bold]Learning Activity (last {hours} hours)[/bold]\n")

    console.print("[bold cyan]Recent Executions[/bold cyan]")
    console.print(f"  Executions in period: [green]{recent_count}[/green]")
    success_rate = stats.get("success_without_retry_rate", 0) * 100
    console.print(
        f"  First-attempt success: "
        f"[{'green' if success_rate > 70 else 'yellow'}]{success_rate:.1f}%[/]"
    )

    console.print("\n[bold cyan]Pattern Application[/bold cyan]")
    pattern_count = stats.get("total_patterns", 0)
    if pattern_count > 0:
        console.print(f"  Active patterns: [yellow]{pattern_count}[/yellow]")
        avg_eff = stats.get("avg_pattern_effectiveness", 0)
        console.print(
            f"  Avg effectiveness: "
            f"[{'green' if avg_eff > 0.6 else 'yellow'}]{avg_eff:.2f}[/]"
        )
    else:
        console.print("  [dim]No patterns learned yet[/dim]")

    console.print("\n[bold cyan]Optimal Execution Windows[/bold cyan]")
    if window.get("confidence", 0) > 0.3:
        optimal = window.get("optimal_hours", [])
        avoid = window.get("avoid_hours", [])

        if optimal:
            optimal_str = ", ".join(f"{h:02d}:00" for h in optimal)
            console.print(f"  [green]✓ Best hours:[/green] {optimal_str}")
        if avoid:
            avoid_str = ", ".join(f"{h:02d}:00" for h in avoid)
            console.print(f"  [red]✗ Avoid hours:[/red] {avoid_str}")

        console.print(
            f"  Confidence: [cyan]{window.get('confidence', 0):.0%}[/cyan] "
            f"(based on {window.get('sample_count', 0)} samples)"
        )
    else:
        console.print("  [dim]Insufficient data for scheduling recommendations[/dim]")

    console.print("\n[bold cyan]Learning Status[/bold cyan]")
    total_executions = stats.get("total_executions", 0)
    if total_executions >= 50:
        console.print("  [green]✓ Learning system is well-trained[/green]")
    elif total_executions >= 10:
        console.print("  [yellow]○ Learning system is gathering data[/yellow]")
    else:
        console.print("  [dim]○ Learning system is in early training[/dim]")

learning_insights

learning_insights(limit=10, pattern_type=None)

Show actionable insights from learning data.

Displays patterns extracted from execution history including: - Output patterns (from stdout/stderr analysis) - Error code patterns (aggregated error statistics) - Success predictors (factors that correlate with success)

Examples:

mzt learning-insights mzt learning-insights --pattern-type output_pattern mzt learning-insights --limit 20

Source code in src/marianne/cli/commands/learning/_stats.py
def learning_insights(
    limit: Annotated[int, typer.Option(help="Max patterns to show")] = 10,
    pattern_type: Annotated[str | None, typer.Option(help="Filter by type")] = None,
) -> None:
    """Show actionable insights from learning data.

    Displays patterns extracted from execution history including:
    - Output patterns (from stdout/stderr analysis)
    - Error code patterns (aggregated error statistics)
    - Success predictors (factors that correlate with success)

    Examples:
        mzt learning-insights
        mzt learning-insights --pattern-type output_pattern
        mzt learning-insights --limit 20
    """
    from marianne.learning.global_store import GlobalLearningStore

    console.print("[bold]Learning Insights[/bold]")
    console.print()

    store = GlobalLearningStore()
    patterns = store.get_patterns(
        pattern_type=pattern_type,
        limit=limit,
    )

    if not patterns:
        console.print("[dim]No patterns learned yet. Run some jobs![/dim]")
        return

    table = Table(title="Learned Patterns")
    table.add_column("Type", style="cyan")
    table.add_column("Description")
    table.add_column("Freq", justify="right")
    table.add_column("Effectiveness", justify="right")

    for pattern in patterns:
        desc_str = pattern.description or ""
        desc = desc_str[:45] + "..." if len(desc_str) > 45 else desc_str
        table.add_row(
            pattern.pattern_type,
            desc or "[no description]",
            str(pattern.occurrence_count),
            f"{pattern.effectiveness_score:.0%}" if pattern.effectiveness_score else "-",
        )

    console.print(table)

learning_stats

learning_stats(json_output=Option(False, '--json', '-j', help='Output as JSON for machine parsing'))

View global learning statistics.

Shows summary statistics about the global learning store including execution counts, pattern counts, and effectiveness metrics.

Examples:

mzt learning-stats # Human-readable summary mzt learning-stats --json # JSON output for scripting

Source code in src/marianne/cli/commands/learning/_stats.py
def learning_stats(
    json_output: bool = typer.Option(
        False,
        "--json",
        "-j",
        help="Output as JSON for machine parsing",
    ),
) -> None:
    """View global learning statistics.

    Shows summary statistics about the global learning store including
    execution counts, pattern counts, and effectiveness metrics.

    Examples:
        mzt learning-stats         # Human-readable summary
        mzt learning-stats --json  # JSON output for scripting
    """
    from marianne.learning.global_store import get_global_store
    from marianne.learning.migration import check_migration_status

    store = get_global_store()
    stats = store.get_execution_stats()
    migration = check_migration_status(store)

    if json_output:
        output = {
            "executions": {
                "total": stats.get("total_executions", 0),
                "success_without_retry_rate": round(
                    stats.get("success_without_retry_rate", 0) * 100, 1
                ),
            },
            "patterns": {
                "total": stats.get("total_patterns", 0),
                "avg_effectiveness": round(stats.get("avg_pattern_effectiveness", 0), 3),
            },
            "workspaces": {
                "unique": stats.get("unique_workspaces", 0),
            },
            "error_recoveries": {
                "total": stats.get("total_error_recoveries", 0),
                "success_rate": round(stats.get("error_recovery_success_rate", 0) * 100, 1),
            },
            "migration_needed": migration.get("needs_migration", False),
        }
        console.print(json_lib.dumps(output, indent=2))
        return

    console.print("[bold]Global Learning Statistics[/bold]\n")

    console.print("[bold cyan]Executions[/bold cyan]")
    console.print(f"  Total recorded: [green]{stats.get('total_executions', 0)}[/green]")
    success_rate = stats.get("success_without_retry_rate", 0) * 100
    color = 'green' if success_rate > 70 else 'yellow'
    console.print(f"  First-attempt success: [{color}]{success_rate:.1f}%[/]")

    console.print("\n[bold cyan]Patterns[/bold cyan]")
    console.print(f"  Total learned: [yellow]{stats.get('total_patterns', 0)}[/yellow]")
    avg_eff = stats.get("avg_pattern_effectiveness", 0)
    eff_color = 'green' if avg_eff > 0.6 else 'yellow'
    console.print(f"  Avg effectiveness: [{eff_color}]{avg_eff:.2f}[/]")

    all_patterns = store.get_patterns(limit=1000)
    output_patterns = sum(1 for p in all_patterns if p.pattern_type == "output_pattern")
    error_code_patterns = sum(1 for p in all_patterns if "error_code" in (p.pattern_name or ""))
    semantic_patterns = sum(1 for p in all_patterns if p.pattern_type == "semantic_failure")

    console.print("\n[bold cyan]Data Sources[/bold cyan]")
    console.print(f"  Output patterns extracted: [cyan]{output_patterns}[/cyan]")
    console.print(f"  Error code patterns: [cyan]{error_code_patterns}[/cyan]")
    console.print(f"  Semantic failure patterns: [cyan]{semantic_patterns}[/cyan]")

    console.print("\n[bold cyan]Workspaces[/bold cyan]")
    console.print(f"  Unique workspaces: [cyan]{stats.get('unique_workspaces', 0)}[/cyan]")

    console.print("\n[bold cyan]Error Recovery Learning[/bold cyan]")
    console.print(f"  Recoveries recorded: {stats.get('total_error_recoveries', 0)}")
    recovery_rate = stats.get("error_recovery_success_rate", 0) * 100
    rec_color = 'green' if recovery_rate > 70 else 'yellow'
    console.print(f"  Recovery success rate: [{rec_color}]{recovery_rate:.1f}%[/]")

    if migration.get("needs_migration"):
        console.print(
            "\n[yellow]⚠ Migration needed:[/yellow] Run 'marianne aggregate-patterns' "
            "to import workspace-local outcomes"
        )