Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix hard-coded double precision in test_functions to default dtype #2597

Closed
wants to merge 13 commits into from
Closed
Show file tree
Hide file tree
Changes from 11 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 8 additions & 3 deletions botorch/test_functions/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ def __init__(
self,
noise_std: None | float | list[float] = None,
negate: bool = False,
dtype: torch.dtype = torch.double,
) -> None:
r"""Base constructor for test functions.

Expand All @@ -37,17 +38,20 @@ def __init__(
provided, specifies separate noise standard deviations for each
objective in a multiobjective problem.
negate: If True, negate the function.
dtype: The dtype that is used for the bounds of the function.
"""
super().__init__()
self.noise_std = noise_std
self.negate = negate
self.dtype = dtype
AVHopp marked this conversation as resolved.
Show resolved Hide resolved
if len(self._bounds) != self.dim:
raise InputDataError(
"Expected the bounds to match the dimensionality of the domain. "
f"Got {self.dim=} and {len(self._bounds)=}."
)
self.register_buffer(
"bounds", torch.tensor(self._bounds, dtype=torch.double).transpose(-1, -2)
"bounds",
torch.tensor(self._bounds, dtype=self.dtype).transpose(-1, -2),
)

def forward(self, X: Tensor, noise: bool = True) -> Tensor:
Expand Down Expand Up @@ -166,6 +170,7 @@ def __init__(
self,
noise_std: None | float | list[float] = None,
negate: bool = False,
dtype: torch.dtype = torch.double,
) -> None:
r"""Base constructor for multi-objective test functions.

Expand All @@ -180,8 +185,8 @@ def __init__(
f"If specified as a list, length of noise_std ({len(noise_std)}) "
f"must match the number of objectives ({len(self._ref_point)})"
)
super().__init__(noise_std=noise_std, negate=negate)
ref_point = torch.tensor(self._ref_point, dtype=torch.get_default_dtype())
super().__init__(noise_std=noise_std, negate=negate, dtype=dtype)
ref_point = torch.tensor(self._ref_point, dtype=self.dtype)
AVHopp marked this conversation as resolved.
Show resolved Hide resolved
if negate:
ref_point *= -1
self.register_buffer("ref_point", ref_point)
Expand Down
17 changes: 13 additions & 4 deletions botorch/test_functions/multi_fidelity.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,13 +74,18 @@ class AugmentedHartmann(SyntheticTestFunction):
_optimizers = [(0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573, 1.0)]
_check_grad_at_opt = False

def __init__(self, noise_std: float | None = None, negate: bool = False) -> None:
def __init__(
self,
noise_std: float | None = None,
negate: bool = False,
dtype: torch.dtype = torch.double,
) -> None:
r"""
Args:
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
AVHopp marked this conversation as resolved.
Show resolved Hide resolved
"""
super().__init__(noise_std=noise_std, negate=negate)
super().__init__(noise_std=noise_std, negate=negate, dtype=dtype)
self.register_buffer("ALPHA", torch.tensor([1.0, 1.2, 3.0, 3.2]))
A = [
[10, 3, 17, 3.5, 1.7, 8],
Expand Down Expand Up @@ -126,7 +131,11 @@ class AugmentedRosenbrock(SyntheticTestFunction):
_optimal_value = 0.0

def __init__(
self, dim=3, noise_std: float | None = None, negate: bool = False
self,
dim=3,
noise_std: float | None = None,
negate: bool = False,
dtype: torch.dtype = torch.double,
AVHopp marked this conversation as resolved.
Show resolved Hide resolved
) -> None:
r"""
Args:
Expand All @@ -141,7 +150,7 @@ def __init__(
self.dim = dim
self._bounds = [(-5.0, 10.0) for _ in range(self.dim)]
self._optimizers = [tuple(1.0 for _ in range(self.dim))]
super().__init__(noise_std=noise_std, negate=negate)
super().__init__(noise_std=noise_std, negate=negate, dtype=dtype)

def evaluate_true(self, X: Tensor) -> Tensor:
X_curr = X[..., :-3]
Expand Down
28 changes: 21 additions & 7 deletions botorch/test_functions/multi_objective.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,13 +119,15 @@ def __init__(
self,
noise_std: None | float | list[float] = None,
negate: bool = False,
dtype: torch.dtype = torch.double,
) -> None:
r"""
Args:
noise_std: Standard deviation of the observation noise.
negate: If True, negate the objectives.
dtype: The dtype that is used for the bounds of the function.
"""
super().__init__(noise_std=noise_std, negate=negate)
super().__init__(noise_std=noise_std, negate=negate, dtype=dtype)
self._branin = Branin()

def _rescaled_branin(self, X: Tensor) -> Tensor:
Expand Down Expand Up @@ -179,12 +181,14 @@ def __init__(
dim: int,
noise_std: None | float | list[float] = None,
negate: bool = False,
dtype: torch.dtype = torch.double,
) -> None:
r"""
Args:
dim: The (input) dimension.
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
dtype: The dtype that is used for the bounds of the function.
"""
if dim < self._min_dim:
raise ValueError(f"dim must be >= {self._min_dim}, but got dim={dim}!")
Expand All @@ -194,7 +198,7 @@ def __init__(
]
# max_hv is the area of the box minus the area of the curve formed by the PF.
self._max_hv = self._ref_point[0] * self._ref_point[1] - self._area_under_curve
super().__init__(noise_std=noise_std, negate=negate)
super().__init__(noise_std=noise_std, negate=negate, dtype=dtype)

@abstractmethod
def _h(self, X: Tensor) -> Tensor:
Expand Down Expand Up @@ -339,13 +343,15 @@ def __init__(
num_objectives: int = 2,
noise_std: None | float | list[float] = None,
negate: bool = False,
dtype: torch.dtype = torch.double,
) -> None:
r"""
Args:
dim: The (input) dimension of the function.
num_objectives: Must be less than dim.
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
dtype: The dtype that is used for the bounds of the function.
"""
if dim <= num_objectives:
raise ValueError(
Expand All @@ -356,7 +362,7 @@ def __init__(
self.k = self.dim - self.num_objectives + 1
self._bounds = [(0.0, 1.0) for _ in range(self.dim)]
self._ref_point = [self._ref_val for _ in range(num_objectives)]
super().__init__(noise_std=noise_std, negate=negate)
super().__init__(noise_std=noise_std, negate=negate, dtype=dtype)


class DTLZ1(DTLZ):
Expand Down Expand Up @@ -608,12 +614,14 @@ def __init__(
noise_std: None | float | list[float] = None,
negate: bool = False,
num_objectives: int = 2,
dtype: torch.dtype = torch.double,
) -> None:
r"""
Args:
noise_std: Standard deviation of the observation noise.
negate: If True, negate the objectives.
num_objectives: The number of objectives.
dtype: The dtype that is used for the bounds of the function.
"""
if num_objectives not in (2, 3, 4):
raise UnsupportedError("GMM only currently supports 2 to 4 objectives.")
Expand All @@ -623,7 +631,7 @@ def __init__(
if num_objectives > 3:
self._ref_point.append(-0.1866)
self.num_objectives = num_objectives
super().__init__(noise_std=noise_std, negate=negate)
super().__init__(noise_std=noise_std, negate=negate, dtype=dtype)
gmm_pos = torch.tensor(
[
[[0.2, 0.2], [0.8, 0.2], [0.5, 0.7]],
Expand Down Expand Up @@ -935,13 +943,15 @@ def __init__(
num_objectives: int = 2,
noise_std: None | float | list[float] = None,
negate: bool = False,
dtype: torch.dtype = torch.double,
) -> None:
r"""
Args:
dim: The (input) dimension of the function.
num_objectives: Number of objectives. Must not be larger than dim.
noise_std: Standard deviation of the observation noise.
negate: If True, negate the function.
dtype: The dtype that is used for the bounds of the function.
"""
if num_objectives != 2:
raise NotImplementedError(
Expand All @@ -954,7 +964,7 @@ def __init__(
self.num_objectives = num_objectives
self.dim = dim
self._bounds = [(0.0, 1.0) for _ in range(self.dim)]
super().__init__(noise_std=noise_std, negate=negate)
super().__init__(noise_std=noise_std, negate=negate, dtype=dtype)

@staticmethod
def _g(X: Tensor) -> Tensor:
Expand Down Expand Up @@ -1246,15 +1256,17 @@ def __init__(
noise_std: None | float | list[float] = None,
constraint_noise_std: None | float | list[float] = None,
negate: bool = False,
dtype: torch.dtype = torch.double,
) -> None:
r"""
Args:
noise_std: Standard deviation of the observation noise of the objectives.
constraint_noise_std: Standard deviation of the observation noise of the
constraint.
negate: If True, negate the function.
dtype: The dtype that is used for the bounds of the function.
"""
super().__init__(noise_std=noise_std, negate=negate)
super().__init__(noise_std=noise_std, negate=negate, dtype=dtype)
con_bounds = torch.tensor(self._con_bounds, dtype=self.bounds.dtype).transpose(
-1, -2
)
Expand Down Expand Up @@ -1357,6 +1369,7 @@ def __init__(
noise_std: None | float | list[float] = None,
constraint_noise_std: None | float | list[float] = None,
negate: bool = False,
dtype: torch.dtype = torch.double,
) -> None:
r"""
Args:
Expand All @@ -1365,12 +1378,13 @@ def __init__(
constraint_noise_std: Standard deviation of the observation noise of the
constraints.
negate: If True, negate the function.
dtype: The dtype that is used for the bounds of the function.
"""
if dim < 2:
raise ValueError("dim must be greater than or equal to 2.")
self.dim = dim
self._bounds = [(0.0, 1.0) for _ in range(self.dim)]
super().__init__(noise_std=noise_std, negate=negate)
super().__init__(noise_std=noise_std, negate=negate, dtype=dtype)
self.constraint_noise_std = constraint_noise_std

def LA2(self, A, B, C, D, theta) -> Tensor:
Expand Down
25 changes: 19 additions & 6 deletions botorch/test_functions/sensitivity_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,13 +24,18 @@ class Ishigami(SyntheticTestFunction):
"""

def __init__(
self, b: float = 0.1, noise_std: float | None = None, negate: bool = False
self,
b: float = 0.1,
noise_std: float | None = None,
negate: bool = False,
dtype: torch.dtype = torch.double,
) -> None:
r"""
Args:
b: the b constant, should be 0.1 or 0.05.
noise_std: Standard deviation of the observation noise.
negative: If True, negative the objective.
dtype: The dtype that is used for the bounds of the function.
"""
self._optimizers = None
if b not in (0.1, 0.05):
Expand All @@ -52,7 +57,7 @@ def __init__(
self.dgsm_gradient_square = [2.8, 24.5, 11]
self._bounds = [(-math.pi, math.pi) for _ in range(self.dim)]
self.b = b
super().__init__(noise_std=noise_std, negate=negate)
super().__init__(noise_std=noise_std, negate=negate, dtype=dtype)

@property
def _optimal_value(self) -> float:
Expand Down Expand Up @@ -127,13 +132,15 @@ def __init__(
a: list = None,
noise_std: float | None = None,
negate: bool = False,
dtype: torch.dtype = torch.double,
) -> None:
r"""
Args:
dim: Dimensionality of the problem. If 6, 8, or 15, will use standard a.
a: a parameter, unless dim is 6, 8, or 15.
noise_std: Standard deviation of observation noise.
negate: Return negatie of function.
negate: Return negative of function.
dtype: The dtype that is used for the bounds of the function.
"""
self._optimizers = None
self.dim = dim
Expand Down Expand Up @@ -163,7 +170,7 @@ def __init__(
else:
self.a = a
self.optimal_sobol_indicies()
super().__init__(noise_std=noise_std, negate=negate)
super().__init__(noise_std=noise_std, negate=negate, dtype=dtype)

@property
def _optimal_value(self) -> float:
Expand Down Expand Up @@ -207,11 +214,17 @@ class Morris(SyntheticTestFunction):
Proposed to test sensitivity analysis methods
"""

def __init__(self, noise_std: float | None = None, negate: bool = False) -> None:
def __init__(
self,
noise_std: float | None = None,
negate: bool = False,
dtype: torch.dtype = torch.double,
) -> None:
r"""
Args:
noise_std: Standard deviation of observation noise.
negate: Return negative of function.
dtype: The dtype that is used for the bounds of the function.
"""
self._optimizers = None
self.dim = 20
Expand All @@ -238,7 +251,7 @@ def __init__(self, noise_std: float | None = None, negate: bool = False) -> None
0,
0,
]
super().__init__(noise_std=noise_std, negate=negate)
super().__init__(noise_std=noise_std, negate=negate, dtype=dtype)

@property
def _optimal_value(self) -> float:
Expand Down
Loading
Loading