Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Benchmark ergonomics #3

Open
wants to merge 18 commits into
base: main
Choose a base branch
from
Open

Benchmark ergonomics #3

wants to merge 18 commits into from

Conversation

eddiebergman
Copy link
Collaborator

@eddiebergman eddiebergman commented Jan 10, 2025

This is the script I was implementing which encouraged these changes.

from __future__ import annotations

import copy
from collections.abc import Mapping
from pathlib import Path
from typing import TYPE_CHECKING

import numpy as np
from ConfigSpace import ConfigurationSpace

from hpoglue import Config, FunctionalBenchmark, Measure, Optimizer, Problem, Query, Result
from hpoglue.fidelity import ContinuousFidelity, Fidelity, ListFidelity, RangeFidelity

if TYPE_CHECKING:
    from hpoglue import Result


class MyOpt(Optimizer):
    """Random Search Optimizer with random fidelities."""

    name = "RandomSearchRandomFidelity"

    # NOTE(eddiebergman): Random search doesn't directly use any of this
    # information but we allow it to be used as it's a common baseline.
    support = Problem.Support(
        fidelities=("single",),
        objectives=("single", "many"),
        cost_awareness=(None, "single", "many"),
        tabular=False,
    )

    mem_req_mb = 1024

    def __init__(
        self,
        *,
        problem: Problem,
        seed: int,
        working_directory: Path,  # noqa: ARG002
    ):
        """Create a Random Search Optimizer instance for a given problem statement."""
        match problem.config_space:
            case ConfigurationSpace():
                self.config_space = copy.deepcopy(problem.config_space)
                self.config_space.seed(seed)
            case list():
                self.config_space = problem.config_space
            case _:
                raise TypeError("Config space must be a ConfigSpace or a list of Configs")

        self.problem = problem
        self._counter = 0
        self.rng = np.random.default_rng(seed)

    def ask(self) -> Query:
        """Ask the optimizer for a new config to evaluate."""
        self._counter += 1
        # We are dealing with a tabular benchmark
        match self.config_space:
            case ConfigurationSpace():
                config = Config(
                    config_id=str(self._counter),
                    values=dict(self.config_space.sample_configuration()),
                )
            case list():
                index = int(self.rng.integers(len(self.config_space)))
                config = self.config_space[index]
            case _:
                raise TypeError("Config space must be a ConfigSpace or a list of Configs")

        match self.problem.fidelities:
            case None:
                fidelity = None
            case (name, fidelity):
                match fidelity:
                    case RangeFidelity():
                        step = self.rng.integers(0, len(fidelity))
                        value = fidelity.min + step * fidelity.stepsize
                    case ContinuousFidelity():
                        value = self.rng.uniform(fidelity.min, fidelity.max)
                    case ListFidelity():
                        value = self.rng.integers(len(fidelity))
                    case _:
                        raise ValueError(
                            "Fidelity must be a RangeFidelity, ContinuousFidelity, or ListFidelity"
                        )

                fidelity = (name, value)
            case Mapping():
                raise NotImplementedError("Random search does not support named fidelities")
            case _:
                raise ValueError("Fidelity must be a string or a list of strings")

        return Query(config=config, fidelity=fidelity)

    def tell(self, result: Result) -> None:
        """Tell the optimizer the result of the query."""
        # NOTE(eddiebergman): Random search does nothing with the result


def some_func(query: Query) -> Result:  # noqa: D103
    assert isinstance(query.fidelity, tuple)

    a = query.config["a"]
    b = query.config["b"]
    _, fidelity = query.fidelity
    return query.make_result({"y": a + b + fidelity})


BRANIN_BENCH = FunctionalBenchmark(
    name="branin",
    config_space=ConfigurationSpace({"a": (-10.0, 10.0), "b": (-10.0, 10.0)}),
    fidelities={
        "z": Fidelity.frm(range(1, 100)),
    },
    metrics={
        "y": Measure.metric((-20 + 100, 20 + 100), minimize=False),
    },
    query=some_func,
)

if __name__ == "__main__":
    import hpoglue

    data = hpoglue.run(MyOpt, BRANIN_BENCH, fidelities="z", budget=100, continuations=False)
    print(data)  # noqa: T201

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

Successfully merging this pull request may close these issues.

1 participant