Skip to content

Running Tools for entire studies¤

Consisting of multiple experiments.

apebench.run_study ¤

run_study(
    configs: list[dict],
    base_path: str,
    *,
    overwrite: bool = False
) -> tuple[list[pathlib.Path], list[pathlib.Path]]

Execute a study with multiple experiments.

By default skips experiments that have already been conducted.

Arguments:

  • configs: A list of dictionaries, each containing the keyword arguments for apebench.run_experiment.
  • base_path: The base path to store the results in.
  • overwrite: Whether to overwrite existing results.

Returns:

  • raw_file_list: A list of paths to the raw data files.
  • network_weights_list: A list of paths to the network weights.
Source code in apebench/_run.py
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
def run_study(
    configs: list[dict],
    base_path: str,
    *,
    overwrite: bool = False,
) -> tuple[list[pathlib.Path], list[pathlib.Path]]:
    """
    Execute a study with multiple experiments.

    By default skips experiments that have already been conducted.

    **Arguments:**

    * `configs`: A list of dictionaries, each containing the
        keyword arguments for [`apebench.run_experiment`][].
    * `base_path`: The base path to store the results in.
    * `overwrite`: Whether to overwrite existing results.

    **Returns:**

    * `raw_file_list`: A list of paths to the raw data files.
    * `network_weights_list`: A list of paths to the
        network weights.
    """
    raw_file_list = []
    network_weights_list = []

    for config in configs:
        experiment_name = get_experiment_name(**config)

        print("Considering")
        print(experiment_name)

        raw_data_folder = base_path / pathlib.Path("raw")
        os.makedirs(raw_data_folder, exist_ok=True)
        raw_data_path = raw_data_folder / pathlib.Path(f"{experiment_name}.csv")

        network_weights_folder = base_path / pathlib.Path("network_weights")
        os.makedirs(network_weights_folder, exist_ok=True)
        network_weights_path = network_weights_folder / pathlib.Path(
            f"{experiment_name}.eqx"
        )

        raw_file_list.append(raw_data_path)
        network_weights_list.append(network_weights_path)

        if (
            os.path.exists(raw_data_path)
            and os.path.exists(network_weights_path)
            and not overwrite
        ):
            print("Skipping, already trained ...")
            print()
            continue

        data, trained_neural_stepper_s = run_experiment(**config)

        data.to_csv(raw_data_path)
        eqx.tree_serialise_leaves(
            network_weights_path,
            trained_neural_stepper_s,
        )

        del data
        del trained_neural_stepper_s

        print("Finished training!")
        print()

    return raw_file_list, network_weights_list

apebench.run_study_convenience ¤

run_study_convenience(
    configs: list[dict],
    base_path: Optional[str] = None,
    *,
    overwrite: bool = False,
    metric_name: Union[str, list[str]] = "mean_nRMSE",
    do_metrics: bool = True,
    do_loss: bool = False,
    do_sample_rollouts: bool = False,
    parse_kwargs: bool = True
) -> tuple[
    pd.DataFrame,
    pd.DataFrame,
    pd.DataFrame,
    list[pathlib.Path],
]

Run a study with multiple experiments and melt and concatenate the results.

Arguments:

  • configs: A list of dictionaries, each containing the keyword arguments for run_experiment.
  • base_path: The base path to store the results in. If None, a path is generated based on the hash of the configs.
  • overwrite: Whether to overwrite existing results.
  • metric_name: The name of the metric to melt.
  • do_metrics: Whether to melt and save the metrics.
  • do_loss: Whether to melt and save the loss.
  • do_sample_rollouts: Whether to melt and save the sample rollouts.
  • parse_kwargs: Whether to parse the scenario kwargs.

Returns:

  • metric_df: The DataFrame containing the metrics.
  • loss_df: The DataFrame containing the loss.
  • sample_rollout_df: The DataFrame containing the sample rollouts.
  • network_weights_list: A list of paths to the network weights.
Source code in apebench/_run.py
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
def run_study_convenience(
    configs: list[dict],
    base_path: Optional[str] = None,
    *,
    overwrite: bool = False,
    metric_name: Union[str, list[str]] = "mean_nRMSE",
    do_metrics: bool = True,
    do_loss: bool = False,
    do_sample_rollouts: bool = False,
    parse_kwargs: bool = True,
) -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, list[pathlib.Path]]:
    """
    Run a study with multiple experiments and melt and concatenate the results.

    **Arguments:**

    * `configs`: A list of dictionaries, each containing the
        keyword arguments for `run_experiment`.
    * `base_path`: The base path to store the results in. If
        `None`, a path is generated based on the hash of the `configs`.
    * `overwrite`: Whether to overwrite existing results.
    * `metric_name`: The name of the metric to melt.
    * `do_metrics`: Whether to melt and save the metrics.
    * `do_loss`: Whether to melt and save the loss.
    * `do_sample_rollouts`: Whether to melt and save the sample rollouts.
    * `parse_kwargs`: Whether to parse the scenario kwargs.

    **Returns:**

    * `metric_df`: The DataFrame containing the metrics.
    * `loss_df`: The DataFrame containing the loss.
    * `sample_rollout_df`: The DataFrame containing the sample rollouts.
    * `network_weights_list`: A list of paths to the network weights.
    """
    if base_path is None:
        config_hash = hash(str(configs))
        base_path = pathlib.Path(f"_results_{config_hash}")

    raw_file_list, network_weights_list = run_study(
        configs,
        base_path,
        overwrite=overwrite,
    )

    melt_concat_from_list(
        raw_file_list,
        base_path,
        metric_name=metric_name,
        do_metrics=do_metrics,
        do_loss=do_loss,
        do_sample_rollouts=do_sample_rollouts,
    )

    if do_metrics:
        metric_df = pd.read_csv(base_path / pathlib.Path("metrics.csv"))
        if parse_kwargs:
            metric_df = read_in_kwargs(metric_df)
    else:
        metric_df = pd.DataFrame()

    if do_loss:
        loss_df = pd.read_csv(base_path / pathlib.Path("train_loss.csv"))
        if parse_kwargs:
            loss_df = read_in_kwargs(loss_df)
    else:
        loss_df = pd.DataFrame()

    if do_sample_rollouts:
        sample_rollout_df = pd.read_csv(base_path / pathlib.Path("sample_rollout.csv"))
        if parse_kwargs:
            sample_rollout_df = read_in_kwargs(sample_rollout_df)
    else:
        sample_rollout_df = pd.DataFrame()

    return metric_df, loss_df, sample_rollout_df, network_weights_list