Skip to content

IO

Config Loading

policy_arena.io.config_loader

Load scenario configuration from YAML and build Scenario objects.

load_config(path)

Load and validate a YAML config file.

Source code in src/policy_arena/io/config_loader.py
77
78
79
80
81
82
83
def load_config(path: str | Path) -> ScenarioConfig:
    """Load and validate a YAML config file."""
    path = Path(path)
    with open(path) as f:
        raw = yaml.safe_load(f)

    return ScenarioConfig(**raw)

load_scenario(path)

Load a YAML config and build a ready-to-run Scenario.

Source code in src/policy_arena/io/config_loader.py
86
87
88
89
def load_scenario(path: str | Path) -> Scenario:
    """Load a YAML config and build a ready-to-run Scenario."""
    config = load_config(path)
    return build_scenario(config)

build_scenario(config)

Build a Scenario from a validated ScenarioConfig.

Source code in src/policy_arena/io/config_loader.py
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
def build_scenario(config: ScenarioConfig) -> Scenario:
    """Build a Scenario from a validated ScenarioConfig."""
    brains: list[Brain] = []
    labels: list[str] = []

    for agent_cfg in config.agents:
        for i in range(agent_cfg.count):
            params = dict(agent_cfg.parameters)
            if "seed" in params and params["seed"] is not None:
                params["seed"] = params["seed"] + i
            agent_cfg_copy = agent_cfg.model_copy(update={"parameters": params})
            brain = _create_brain(config.game, agent_cfg_copy)
            brains.append(brain)
            label = f"{agent_cfg.name}_{i}" if agent_cfg.count > 1 else agent_cfg.name
            labels.append(label)

    world_params = _build_world_params(config, brains, labels)

    return Scenario(
        world_class=MODEL_CLASSES[config.game],
        world_params=world_params,
        steps=config.rounds,
        seed=config.seed,
    )

Schemas

policy_arena.io.schemas

Pydantic schemas for scenario configuration and results.

AgentConfig

Bases: BaseModel

Configuration for a single agent (or group of identical agents).

ScenarioConfig

Bases: BaseModel

Full scenario specification loaded from YAML.

Results Writer

policy_arena.io.results_writer

Write simulation results to Parquet files via polars.

write_results(results, config=None, output_dir=None, run_id=None)

Write simulation results to Parquet files.

Creates a directory structure: {output_dir}/{run_id}/ rounds.parquet — per-agent per-round data metrics.parquet — model-level metrics per round run_metadata.json — config snapshot + timing

Returns the run directory path.

Source code in src/policy_arena/io/results_writer.py
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
def write_results(
    results: RunResults,
    config: ScenarioConfig | None = None,
    output_dir: str | Path | None = None,
    run_id: str | None = None,
) -> Path:
    """Write simulation results to Parquet files.

    Creates a directory structure:
        {output_dir}/{run_id}/
            rounds.parquet      — per-agent per-round data
            metrics.parquet     — model-level metrics per round
            run_metadata.json   — config snapshot + timing

    Returns the run directory path.
    """
    run_id = run_id or uuid.uuid4().hex[:12]
    output_dir = Path(output_dir or "results")
    run_dir = output_dir / run_id
    run_dir.mkdir(parents=True, exist_ok=True)

    _write_rounds(results, run_id, run_dir)
    _write_metrics(results, run_id, run_dir)
    _write_metadata(config, run_id, run_dir)

    return run_dir

Results Reader

policy_arena.io.results_reader

Read simulation results from Parquet files.

StoredResults(run_id, rounds=None, metrics=None, metadata=dict()) dataclass

Results loaded from disk.

read_results(run_dir)

Read simulation results from a run directory.

Source code in src/policy_arena/io/results_reader.py
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
def read_results(run_dir: str | Path) -> StoredResults:
    """Read simulation results from a run directory."""
    run_dir = Path(run_dir)

    if not run_dir.exists():
        raise FileNotFoundError(f"Run directory not found: {run_dir}")

    run_id = run_dir.name

    rounds = None
    rounds_path = run_dir / "rounds.parquet"
    if rounds_path.exists():
        rounds = pl.read_parquet(rounds_path)

    metrics = None
    metrics_path = run_dir / "metrics.parquet"
    if metrics_path.exists():
        metrics = pl.read_parquet(metrics_path)

    metadata: dict[str, Any] = {}
    meta_path = run_dir / "run_metadata.json"
    if meta_path.exists():
        with open(meta_path) as f:
            metadata = json.load(f)

    return StoredResults(
        run_id=run_id,
        rounds=rounds,
        metrics=metrics,
        metadata=metadata,
    )

list_runs(results_dir='results')

List all run IDs in the results directory.

Source code in src/policy_arena/io/results_reader.py
56
57
58
59
60
61
62
63
64
65
def list_runs(results_dir: str | Path = "results") -> list[str]:
    """List all run IDs in the results directory."""
    results_dir = Path(results_dir)
    if not results_dir.exists():
        return []
    return sorted(
        d.name
        for d in results_dir.iterdir()
        if d.is_dir() and (d / "run_metadata.json").exists()
    )