Spherical linear kernels for high dimensional BO

Spherical Linear Kernel is useful for optimizing high-dimensional problems.

from bofire.benchmarks.svm import SVM
from bofire.data_models.strategies.api import SoboStrategy
from bofire.data_models.kernels.api import SphericalLinearKernel
from bofire.data_models.surrogates.api import SingleTaskGPSurrogate, BotorchSurrogates
import bofire.strategies.api as strategies

We use the SVM benchmark.

# problem setup for spherical linear kernels
benchmark = SVM()
candidates = benchmark._domain.inputs.sample(benchmark.dim+1, seed=benchmark.seed)
experiments = candidates.copy()
result = benchmark._f(experiments)
# Add empty columns 'y' and 'valid_y' to experiments DataFrame
experiments["y"], experiments["valid_y"] = result["y"], result["valid_y"]
sobo_strategy_data_model = SoboStrategy(
    domain=benchmark._domain,
    seed=benchmark.seed,
    surrogate_specs=BotorchSurrogates(
        surrogates=[
            SingleTaskGPSurrogate(
                inputs=benchmark._domain.inputs,
                outputs=benchmark._domain.outputs,
                kernel=SphericalLinearKernel(),
            )
        ]
    ),
)
strategy = strategies.map(sobo_strategy_data_model)
Downloading SVM data...
Download complete.

Running the optimization loop

strategy.tell(experiments, replace=True)
num_steps = 3 # set the number of steps here (the original paper uses 1000 steps)
for step_number in range(num_steps):
    print(f"Step {step_number+1}/{num_steps}")
    new_candidates = strategy.ask(candidate_count=1)
    new_experiments = new_candidates.copy()
    result = benchmark._f(new_candidates)
    new_experiments["y"], new_experiments["valid_y"] = result["y"], result["valid_y"]
    print(f"New experiment:\n{new_experiments}")
    strategy.tell(experiments=new_experiments)
# save all the experiments
all_experiments = strategy.experiments
/opt/hostedtoolcache/Python/3.12.12/x64/lib/python3.12/site-packages/bofire/surrogates/botorch.py:181: UserWarning: The given NumPy array is not writable, and PyTorch does not support non-writable tensors. This means writing to this tensor will result in undefined behavior. You may want to copy the array to protect its data or make it writable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at /pytorch/torch/csrc/utils/tensor_numpy.cpp:213.)
  torch.from_numpy(Y.values).to(**tkwargs),
Step 1/3
/opt/hostedtoolcache/Python/3.12.12/x64/lib/python3.12/site-packages/linear_operator/utils/cholesky.py:41: NumericalWarning: A not p.d., added jitter of 1.0e-08 to the diagonal
  warnings.warn(
New experiment:
        x_1      x_10     x_100     x_101     x_102     x_103    x_104  \
0  0.826582  0.271678  0.127493  0.837353  0.738748  0.953452  0.18695   

      x_105     x_106     x_107  ...     x_95      x_96      x_97      x_98  \
0  0.152946  0.464439  0.311761  ...  0.53644  0.739997  0.833448  0.099662   

      x_99    y_pred     y_sd     y_des         y  valid_y  
0  0.71552  0.236782  0.00201 -0.236782  0.231462        1  

[1 rows x 393 columns]
Step 2/3
/opt/hostedtoolcache/Python/3.12.12/x64/lib/python3.12/site-packages/linear_operator/utils/cholesky.py:41: NumericalWarning: A not p.d., added jitter of 1.0e-08 to the diagonal
  warnings.warn(
New experiment:
        x_1      x_10     x_100  x_101     x_102     x_103     x_104  x_105  \
0  0.762476  0.974247  0.545345    1.0  0.285827  0.083441  0.486341    1.0   

      x_106     x_107  ...      x_95      x_96      x_97      x_98     x_99  \
0  0.750325  0.254731  ...  0.580569  0.925144  0.352817  0.614842  0.66392   

     y_pred      y_sd     y_des         y  valid_y  
0  0.205848  0.001989 -0.205848  0.234137        1  

[1 rows x 393 columns]
Step 3/3
/opt/hostedtoolcache/Python/3.12.12/x64/lib/python3.12/site-packages/linear_operator/utils/cholesky.py:41: NumericalWarning: A not p.d., added jitter of 1.0e-08 to the diagonal
  warnings.warn(
New experiment:
   x_1  x_10     x_100  x_101  x_102  x_103  x_104     x_105     x_106  \
0  1.0   1.0  0.126734    1.0    0.0    1.0    0.0  0.529237  0.893705   

      x_107  ...      x_95  x_96      x_97      x_98  x_99    y_pred  \
0  0.847225  ...  0.968973   1.0  0.373301  0.024165   1.0  0.223986   

       y_sd     y_des         y  valid_y  
0  0.001895 -0.223986  0.235613        1  

[1 rows x 393 columns]

One can use the results obtained in all_experiments to get the evolution of the optimum with respect to the iterations.