In [1]:
import math

import botorch
import gpytorch
import matplotlib.pyplot as plt
import numpy as np
import torch

torch.set_default_dtype(torch.float64)


def toy_f(x):
    """
    Toy deterministic function with a clear large maximum in the center. Toward
    the edges, the function has sudden dropoffs from "medium" to "bad" values.
    The function has no sudden jumps to "good" values... but we'll see that the
    GP will act like it does.
    """
    cbad = -1.
    cmed = 0.
    cgood = 1.

    switch_points1 = torch.tensor(
        [0.1314, 0.1377, 0.1420, 0.1455, 0.1492, 0.1526, 0.1542, 0.1580, 0.1651,
         0.1752, 0.1771, 0.1908, 0.1921, 0.2082, 0.2166, 0.2170, 0.2192, 0.2268,
         0.2380, 0.2394]
    )

    switch_points2 = torch.tensor(
        [0.7544, 0.7593, 0.7618, 0.7705, 0.7779, 0.7816, 0.7856, 0.7856, 0.7904,
        0.7955, 0.7963, 0.8057, 0.8066, 0.8143, 0.8150, 0.8205, 0.8325, 0.8561,
        0.8654, 0.8704]
    )

    if x < 1/8:
        return cbad
    elif x < 2/8:
        bad = (x < switch_points1).count_nonzero().item() % 2 == 0
        return cbad if bad else cmed
    elif x < 3/8:
        return cmed + ((x - 2/8))/(1/8) * (cgood - cmed)
    elif x < 5/8:
        return cgood
    elif x < 6/8:
        return cmed + ((6/8 - x))/(1/8) * (cgood - cmed)
    elif x < 7/8:
        bad = (x < switch_points2).count_nonzero().item() % 2 == 0
        return cbad if bad else cmed
    else:
        return cbad

def do_demo(xlim=None, show_pred=False, show_err=False):
    # Cherry-picked random vector that exhibits this phenomenon.
    X = torch.tensor(
        [0.0049, 0.0124, 0.0201, 0.0294, 0.0307, 0.0511, 0.0598, 0.0808, 0.1184,
        0.1395, 0.1514, 0.2075, 0.2129, 0.2439, 0.2518, 0.2532, 0.2699, 0.3145,
        0.3171, 0.3226, 0.3265, 0.3380, 0.3401, 0.3433, 0.3618, 0.3716, 0.3752,
        0.3867, 0.3916, 0.4433, 0.4435, 0.4512, 0.4551, 0.5136, 0.5304, 0.5349,
        0.5402, 0.5568, 0.5698, 0.5713, 0.5889, 0.6051, 0.6127, 0.6324, 0.6426,
        0.6444, 0.6539, 0.6647, 0.6664, 0.6789, 0.6814, 0.6827, 0.6839, 0.6913,
        0.6929, 0.7073, 0.7094, 0.7096, 0.7206, 0.7329, 0.7784, 0.7876, 0.7970,
        0.8395, 0.8402, 0.8411, 0.8763, 0.8782, 0.8821, 0.8831, 0.9127, 0.9241,
        0.9321, 0.9436, 0.9456, 0.9481, 0.9556, 0.9636, 0.9679, 0.9712]
    ).unsqueeze(-1)
    Y = torch.tensor([toy_f(x[0]) for x in X]).unsqueeze(-1)
    Y_var = torch.full_like(Y, 0.01)

    model = botorch.models.FixedNoiseGP(X, Y, Y_var)
    mll = gpytorch.mlls.ExactMarginalLogLikelihood(model.likelihood, model)
    botorch.fit_gpytorch_mll(mll)

    plt.plot(X.numpy(), Y.numpy(), "o", color="black", markersize=5, label="Observations")

    acq_function = botorch.acquisition.ExpectedImprovement(model, best_f=1.0)
    bounds = torch.tensor([[0.0], [1.0]])
    candidates, acq_value = botorch.optim.optimize_acqf(
        acq_function=acq_function,
        bounds=bounds,
        q=1,
        num_restarts=10,
        raw_samples=512,
        options={"batch_limit": 5, "maxiter": 200},
    )
    candidates = candidates.detach()

    plt.plot(candidates.numpy(), model.posterior(candidates).mean.detach().item(), "o", color="C1", markersize=5, label="Predicted maximum (WTF?)")

    if show_pred:
        X_pred = torch.linspace(0, 1, 1000).unsqueeze(-1)

        with torch.no_grad():
            posterior = model.posterior(X_pred)
        plt.plot(X_pred.numpy(), posterior.mean.numpy(), "b", label="Expected value (from GP)")
        if show_err:
            plt.fill_between(
                X_pred.squeeze().numpy(),
                posterior.mean.squeeze().numpy() - posterior.stddev.squeeze().numpy(),
                posterior.mean.squeeze().numpy() + posterior.stddev.squeeze().numpy(),
                alpha=0.5,
                color="blue",
                label="1 std. dev. (from GP)",
            )
        # plt.ylim(-110, 25)


    # place legend outside of plot, on upper right
    plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)

    plt.title("Random tests of a toy function")
    plt.xlabel("x")
    plt.ylabel("f(x)")
    plt.yticks([])

    if xlim is not None:
        plt.xlim(xlim)

    # plt.savefig("demo2.svg", bbox_inches="tight")
    plt.show()
<stdin>:1:10: fatal error: 'omp.h' file not found
#include <omp.h>
         ^~~~~~~
1 error generated.
[KeOps] Warning : omp.h header is not in the path, disabling OpenMP.
[KeOps] Warning : Cuda libraries were not detected on the system ; using cpu only mode
In [2]:
do_demo()
/Users/mrcslws/dev/src/botorch/botorch/models/utils/assorted.py:201: InputDataWarning: Input data is not standardized. Please consider scaling the input to zero mean and unit variance.
  warnings.warn(msg, InputDataWarning)
In [3]:
do_demo(xlim=(0.11, 0.25))
/Users/mrcslws/dev/src/botorch/botorch/models/utils/assorted.py:201: InputDataWarning: Input data is not standardized. Please consider scaling the input to zero mean and unit variance.
  warnings.warn(msg, InputDataWarning)
In [4]:
do_demo(show_pred=True, show_err=True)
/Users/mrcslws/dev/src/botorch/botorch/models/utils/assorted.py:201: InputDataWarning: Input data is not standardized. Please consider scaling the input to zero mean and unit variance.
  warnings.warn(msg, InputDataWarning)
In [ ]: