-
I have re-implemented the regression tutorial with an increased number of test points and noticed that my samples from the latent function are not as smooth as expected. See attached images. Why is this happening? I also tried hardcoding a large lengthscale, but this does not fix the issue, either. For the given example, increasing the lengthscale just underfits the training data while still producing function samples that are not smooth. Am I missing something? Code to reproduce the figures: import torch
import gpytorch as gpt
import matplotlib.pyplot as plt
import numpy as np
# training data
noise_var = 0.04
x_train = torch.linspace(0,1,100)
y_train = torch.sin(x_train*(2*np.pi)) + torch.randn(x_train.size()) * np.sqrt(noise_var)
# model
class ExactGP(gpt.models.ExactGP):
def __init__(self,x_train,y_train,likelihood):
super(ExactGP,self).__init__(x_train,y_train,likelihood)
self.mean_module = gpt.means.ConstantMean()
self.covar_module = gpt.kernels.ScaleKernel(gpt.kernels.RBFKernel())
def forward(self,x):
return gpt.distributions.MultivariateNormal(self.mean_module(x),self.covar_module(x))
likelihood = gpt.likelihoods.GaussianLikelihood()
model = ExactGP(x_train,y_train,likelihood)
# optimize hyperparams
n_iter = 50
model.train()
likelihood.train()
optimizer = torch.optim.Adam(model.parameters(),lr=0.1)
mll = gpt.mlls.ExactMarginalLogLikelihood(likelihood,model)
for i in range(n_iter):
optimizer.zero_grad()
y_hat = model(x_train)
loss = -mll(y_hat,y_train)
loss.backward()
optimizer.step()
# make predictions
model.eval()
likelihood.eval()
with torch.no_grad(),gpt.settings.fast_pred_var():
x_test = torch.linspace(0,1,501)
f_hat = model(x_test)
y_hat = likelihood(f_hat)
# plot em
fig,ax = plt.subplots(nrows=1,ncols=2,figsize=(10,5),sharey=True)
fig_zoom,ax_zoom = plt.subplots()
with torch.no_grad():
lower, upper = y_hat.confidence_region()
ax[0].plot(x_train.numpy(),y_train.numpy(),'k*',label='Observations')
ax[0].plot(x_test.numpy(),y_hat.mean.numpy(),'b',label='Mean')
ax[0].legend()
ax[0].set_title('Observations and mean')
for i in range(5):
f_samp = f_hat.sample()
ax[1].plot(x_test.numpy(),f_samp.numpy(),'b',alpha=0.5)
ax_zoom.plot(x_test.numpy(),f_samp.numpy(),'b',alpha=0.5)
ax[1].set_title('F samples')
ax_zoom.set_title('F samples (zoomed)')
ax_zoom.set_xlim((0.1,0.3))
ax_zoom.set_ylim((0.5,1.2))
fig.savefig('plots.png',dpi=200)
fig_zoom.savefig('plot_zoom.png',dpi=200)
plt.show() Note: This related post (#2094) is concerned with sampling from the likelihood, where my post is asking about samples from the model. |
Beta Was this translation helpful? Give feedback.
Replies: 1 comment 1 reply
-
This seems like it could be numerical issues. What happens if you run this with |
Beta Was this translation helpful? Give feedback.
This seems like it could be numerical issues. What happens if you run this with
torch.double
data type instead? And omit thegpt.settings.fast_pred_var()
context?