Probability of Failure Estimation with Gaussian Processes¶
In [1]:
Copied!
import qmcpy as qp
import numpy as np
import scipy.stats
import gpytorch
import torch
import os
import warnings
import pandas as pd
from gpytorch.utils.warnings import NumericalWarning
warnings.filterwarnings("ignore")
pd.set_option(
'display.max_rows', None,
'display.max_columns', None,
'display.width', 1000,
'display.colheader_justify', 'center',
'display.precision',2,
'display.float_format',lambda x:'%.1e'%x)
from matplotlib import pyplot,gridspec
pyplot.style.use('seaborn-v0_8-whitegrid')
pyplot.rcParams['font.size'] = 25
pyplot.rcParams['legend.fontsize'] = 25
pyplot.rcParams['lines.linewidth'] = 5
pyplot.rcParams['lines.markersize'] = 10
import qmcpy as qp
import numpy as np
import scipy.stats
import gpytorch
import torch
import os
import warnings
import pandas as pd
from gpytorch.utils.warnings import NumericalWarning
warnings.filterwarnings("ignore")
pd.set_option(
'display.max_rows', None,
'display.max_columns', None,
'display.width', 1000,
'display.colheader_justify', 'center',
'display.precision',2,
'display.float_format',lambda x:'%.1e'%x)
from matplotlib import pyplot,gridspec
pyplot.style.use('seaborn-v0_8-whitegrid')
pyplot.rcParams['font.size'] = 25
pyplot.rcParams['legend.fontsize'] = 25
pyplot.rcParams['lines.linewidth'] = 5
pyplot.rcParams['lines.markersize'] = 10
In [2]:
Copied!
gpytorch_use_gpu = torch.cuda.is_available()
gpytorch_use_gpu
gpytorch_use_gpu = torch.cuda.is_available()
gpytorch_use_gpu
Out[2]:
False
Method Visual¶
In [3]:
Copied!
cols = 2
nticks = 129
np.random.seed(7); torch.manual_seed(1)
ci_percentage = .95
beta = scipy.stats.norm.ppf(np.mean([ci_percentage,1]))
f = lambda x: x*np.sin(4*np.pi*x)
x = qp.Lattice(1,seed=7).gen_samples(4).squeeze()
y = f(x)
gp = qp.util.ExactGPyTorchRegressionModel(
x_t = x[:,None],
y_t = y,
prior_mean = gpytorch.means.ZeroMean(),
prior_cov = gpytorch.kernels.ScaleKernel(gpytorch.kernels.MaternKernel(nu=2.5)),
likelihood = gpytorch.likelihoods.GaussianLikelihood(noise_constraint = gpytorch.constraints.Interval(1e-12,1e-8)))
gp.fit(
optimizer = torch.optim.Adam(gp.parameters(),lr=0.1),
mll = gpytorch.mlls.ExactMarginalLogLikelihood(gp.likelihood,gp),
training_iter = 100)
xticks = np.linspace(0,1,nticks)
yticks = f(xticks)
yhatticks_mean,yhatticks_std = gp.predict(xticks[:,None])
f_preds = gp(torch.from_numpy(xticks[:,None]))
f_samples = f_preds.sample(sample_shape=torch.Size((cols,))).numpy()
fig = pyplot.figure(tight_layout=True)
gs = gridspec.GridSpec(2,cols)
ax_top = fig.add_subplot(gs[0,:])
scatter_data = ax_top.scatter(x,y,color='r')
plot_yticks, = ax_top.plot(xticks,yticks,color='c')
plot_post_mean, = ax_top.plot(xticks,yhatticks_mean,color='k')
plot_gp_ci = ax_top.fill_between(xticks,yhatticks_mean+beta*yhatticks_std,yhatticks_mean-beta*yhatticks_std,color='k',alpha=.25)
ax_top.axhline(y=0,color='lightgreen')
for spine in ['top','bottom']: ax_top.spines[spine].set_visible(False)
ax_top.set_xlim([0,1]); ax_top.set_xticks([]); ax_top.set_yticks([])
for c in range(cols):
f_sample = f_samples[c]
ax = fig.add_subplot(gs[1,c]) if c==0 else fig.add_subplot(gs[1,c],sharey=ax)
ax.scatter(x,y,color='r')
ax.plot(xticks,yhatticks_mean,color='k')
ax.set_xlim([0,1]); ax.set_xticks([])
plot_draw, = ax.plot(xticks,f_sample,c='b')
ymin,ymax = ax.get_ylim()#; ax.set_aspect(1/(ymax-ymin))
yminticks,ymaxticks = np.tile(ymin,nticks),np.tile(ymax,nticks)
tp = (f_sample>=0)*(yhatticks_mean>=0)
tn = (f_sample<0)*(yhatticks_mean<0)
fp = (f_sample<0)*(yhatticks_mean>=0)
fn = (f_sample>=0)*(yhatticks_mean<0)
plot_tp, = ax.plot(xticks,np.ma.masked_where(~tp,ymaxticks),color='m',linewidth=5)
plot_fp, = ax.plot(xticks,np.ma.masked_where(~fp,ymaxticks),color='orange',linewidth=5)
plot_tn, = ax.plot(xticks,np.ma.masked_where(~tn,yminticks),color='m',linewidth=5)
plot_fn, = ax.plot(xticks,np.ma.masked_where(~fn,yminticks),color='orange',linewidth=5)
ax.set_ylim([ymin-(ymax-ymin)*.05,ymax+(ymax-ymin)*.05])
for spine in ['top','bottom']: ax.spines[spine].set_visible(False)
ax.set_yticks([])
plot_failure_threshold = ax.axhline(y=0,color='lightgreen')
fig.legend([scatter_data,plot_yticks,plot_post_mean,plot_gp_ci],
['data','simulation','posterior mean','posterior 95% CI'],
ncol=4,
frameon=False,
loc='upper center',
bbox_to_anchor=(.5,1.075),
prop={'size':11})
fig.legend([plot_failure_threshold,plot_draw,plot_tp,plot_fp,],
['failure threshold','sample path','True','False'],
ncol=5,
frameon=False,
loc='upper center',
bbox_to_anchor=(.5,0),#.58),
prop={'size':11})
fig.savefig("outputs/TP_FP_TN_FN.png",dpi=256,transparent=True,bbox_inches="tight")
cols = 2
nticks = 129
np.random.seed(7); torch.manual_seed(1)
ci_percentage = .95
beta = scipy.stats.norm.ppf(np.mean([ci_percentage,1]))
f = lambda x: x*np.sin(4*np.pi*x)
x = qp.Lattice(1,seed=7).gen_samples(4).squeeze()
y = f(x)
gp = qp.util.ExactGPyTorchRegressionModel(
x_t = x[:,None],
y_t = y,
prior_mean = gpytorch.means.ZeroMean(),
prior_cov = gpytorch.kernels.ScaleKernel(gpytorch.kernels.MaternKernel(nu=2.5)),
likelihood = gpytorch.likelihoods.GaussianLikelihood(noise_constraint = gpytorch.constraints.Interval(1e-12,1e-8)))
gp.fit(
optimizer = torch.optim.Adam(gp.parameters(),lr=0.1),
mll = gpytorch.mlls.ExactMarginalLogLikelihood(gp.likelihood,gp),
training_iter = 100)
xticks = np.linspace(0,1,nticks)
yticks = f(xticks)
yhatticks_mean,yhatticks_std = gp.predict(xticks[:,None])
f_preds = gp(torch.from_numpy(xticks[:,None]))
f_samples = f_preds.sample(sample_shape=torch.Size((cols,))).numpy()
fig = pyplot.figure(tight_layout=True)
gs = gridspec.GridSpec(2,cols)
ax_top = fig.add_subplot(gs[0,:])
scatter_data = ax_top.scatter(x,y,color='r')
plot_yticks, = ax_top.plot(xticks,yticks,color='c')
plot_post_mean, = ax_top.plot(xticks,yhatticks_mean,color='k')
plot_gp_ci = ax_top.fill_between(xticks,yhatticks_mean+beta*yhatticks_std,yhatticks_mean-beta*yhatticks_std,color='k',alpha=.25)
ax_top.axhline(y=0,color='lightgreen')
for spine in ['top','bottom']: ax_top.spines[spine].set_visible(False)
ax_top.set_xlim([0,1]); ax_top.set_xticks([]); ax_top.set_yticks([])
for c in range(cols):
f_sample = f_samples[c]
ax = fig.add_subplot(gs[1,c]) if c==0 else fig.add_subplot(gs[1,c],sharey=ax)
ax.scatter(x,y,color='r')
ax.plot(xticks,yhatticks_mean,color='k')
ax.set_xlim([0,1]); ax.set_xticks([])
plot_draw, = ax.plot(xticks,f_sample,c='b')
ymin,ymax = ax.get_ylim()#; ax.set_aspect(1/(ymax-ymin))
yminticks,ymaxticks = np.tile(ymin,nticks),np.tile(ymax,nticks)
tp = (f_sample>=0)*(yhatticks_mean>=0)
tn = (f_sample<0)*(yhatticks_mean<0)
fp = (f_sample<0)*(yhatticks_mean>=0)
fn = (f_sample>=0)*(yhatticks_mean<0)
plot_tp, = ax.plot(xticks,np.ma.masked_where(~tp,ymaxticks),color='m',linewidth=5)
plot_fp, = ax.plot(xticks,np.ma.masked_where(~fp,ymaxticks),color='orange',linewidth=5)
plot_tn, = ax.plot(xticks,np.ma.masked_where(~tn,yminticks),color='m',linewidth=5)
plot_fn, = ax.plot(xticks,np.ma.masked_where(~fn,yminticks),color='orange',linewidth=5)
ax.set_ylim([ymin-(ymax-ymin)*.05,ymax+(ymax-ymin)*.05])
for spine in ['top','bottom']: ax.spines[spine].set_visible(False)
ax.set_yticks([])
plot_failure_threshold = ax.axhline(y=0,color='lightgreen')
fig.legend([scatter_data,plot_yticks,plot_post_mean,plot_gp_ci],
['data','simulation','posterior mean','posterior 95% CI'],
ncol=4,
frameon=False,
loc='upper center',
bbox_to_anchor=(.5,1.075),
prop={'size':11})
fig.legend([plot_failure_threshold,plot_draw,plot_tp,plot_fp,],
['failure threshold','sample path','True','False'],
ncol=5,
frameon=False,
loc='upper center',
bbox_to_anchor=(.5,0),#.58),
prop={'size':11})
fig.savefig("outputs/TP_FP_TN_FN.png",dpi=256,transparent=True,bbox_inches="tight")
Sin 1d Problem¶
In [4]:
Copied!
mcispfgp = qp.PFGPCI(
integrand = qp.Sin1d(qp.DigitalNetB2(1,seed=17),k=3),
failure_threshold = 0,
failure_above_threshold=True,
abs_tol = 1e-2,
alpha = 1e-1,
n_init = 4,
init_samples = None,
batch_sampler = qp.PFSampleErrorDensityAR(verbose=True),
n_batch = 4,
n_limit = 20,
n_approx = 2**18,
gpytorch_prior_mean = gpytorch.means.ZeroMean(),
gpytorch_prior_cov = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.MaternKernel(nu=2.5,
lengthscale_constraint = gpytorch.constraints.Interval(.01,.1)
),
outputscale_constraint = gpytorch.constraints.Interval(1e-3,10)
),
gpytorch_likelihood = gpytorch.likelihoods.GaussianLikelihood(noise_constraint = gpytorch.constraints.Interval(1e-12,1e-8)),
gpytorch_marginal_log_likelihood_func = lambda likelihood,gpyt_model: gpytorch.mlls.ExactMarginalLogLikelihood(likelihood,gpyt_model),
torch_optimizer_func = lambda gpyt_model: torch.optim.Adam(gpyt_model.parameters(),lr=0.1),
gpytorch_train_iter = 100,
gpytorch_use_gpu = False,
verbose = 50,
n_ref_approx = 2**22,
seed_ref_approx = None)
solution,data = mcispfgp.integrate(seed=7,refit=False)
print(data)
df = pd.DataFrame(data.get_results_dict())
print("\nIteration Summary")
print(df)
fig = data.plot()
df.to_csv("outputs/Sin1D_df.csv",index=False)
fig.savefig("outputs/Sin1D_fig.png",dpi=256,transparent=True,bbox_inches="tight")
mcispfgp = qp.PFGPCI(
integrand = qp.Sin1d(qp.DigitalNetB2(1,seed=17),k=3),
failure_threshold = 0,
failure_above_threshold=True,
abs_tol = 1e-2,
alpha = 1e-1,
n_init = 4,
init_samples = None,
batch_sampler = qp.PFSampleErrorDensityAR(verbose=True),
n_batch = 4,
n_limit = 20,
n_approx = 2**18,
gpytorch_prior_mean = gpytorch.means.ZeroMean(),
gpytorch_prior_cov = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.MaternKernel(nu=2.5,
lengthscale_constraint = gpytorch.constraints.Interval(.01,.1)
),
outputscale_constraint = gpytorch.constraints.Interval(1e-3,10)
),
gpytorch_likelihood = gpytorch.likelihoods.GaussianLikelihood(noise_constraint = gpytorch.constraints.Interval(1e-12,1e-8)),
gpytorch_marginal_log_likelihood_func = lambda likelihood,gpyt_model: gpytorch.mlls.ExactMarginalLogLikelihood(likelihood,gpyt_model),
torch_optimizer_func = lambda gpyt_model: torch.optim.Adam(gpyt_model.parameters(),lr=0.1),
gpytorch_train_iter = 100,
gpytorch_use_gpu = False,
verbose = 50,
n_ref_approx = 2**22,
seed_ref_approx = None)
solution,data = mcispfgp.integrate(seed=7,refit=False)
print(data)
df = pd.DataFrame(data.get_results_dict())
print("\nIteration Summary")
print(df)
fig = data.plot()
df.to_csv("outputs/Sin1D_df.csv",index=False)
fig.savefig("outputs/Sin1D_fig.png",dpi=256,transparent=True,bbox_inches="tight")
reference approximation with d=1: 0.5000002384185791
batch 0
gpytorch model fitting
iter 50 of 100
likelihood.noise_covar.raw_noise.................. -8.37e-02
covar_module.raw_outputscale...................... -3.03e+00
covar_module.base_kernel.raw_lengthscale.......... 5.02e+00
iter 100 of 100
likelihood.noise_covar.raw_noise.................. -9.22e-02
covar_module.raw_outputscale...................... -2.96e+00
covar_module.base_kernel.raw_lengthscale.......... 6.27e+00
batch 1
AR sampling with efficiency 2.8e-01, expect 14 draws: 12, 15, 18, 21, 24,
batch 2
AR sampling with efficiency 1.7e-01, expect 23 draws: 16, 24, 28,
batch 3
AR sampling with efficiency 9.4e-02, expect 42 draws: 28,
batch 4
AR sampling with efficiency 6.0e-02, expect 66 draws: 48, 72,
PFGPCIData (Data)
solution 0.501
error_bound 0.111
bound_low 0.390
bound_high 0.611
n_total 20
time_integrate 0.363
PFGPCI (AbstractStoppingCriterion)
abs_tol 0.010
n_init 2^(2)
n_limit 20
n_batch 2^(2)
Sin1d (AbstractIntegrand)
Uniform (AbstractTrueMeasure)
lower_bound 0
upper_bound 18.850
DigitalNetB2 (AbstractLDDiscreteDistribution)
d 1
replications 1
randomize LMS DS
gen_mats_source joe_kuo.6.21201.txt
order RADICAL INVERSE
t 63
alpha 1
n_limit 2^(32)
entropy 17
Iteration Summary
iter n_sum n_batch error_bounds ci_low ci_high solutions solutions_ref error_ref in_ci
0 0 4 4 5.4e-01 0.0e+00 1.0e+00 4.6e-01 5.0e-01 3.9e-02 True
1 1 8 4 5.0e-01 0.0e+00 1.0e+00 5.0e-01 5.0e-01 4.2e-03 True
2 2 12 4 4.7e-01 3.5e-02 9.8e-01 5.1e-01 5.0e-01 6.4e-03 True
3 3 16 4 3.0e-01 2.1e-01 8.1e-01 5.1e-01 5.0e-01 9.4e-03 True
4 4 20 4 1.1e-01 3.9e-01 6.1e-01 5.0e-01 5.0e-01 5.2e-04 True
Multimodal 2d Problem¶
In [5]:
Copied!
mcispfgp = qp.PFGPCI(
integrand = qp.Multimodal2d(qp.DigitalNetB2(2,seed=17)),
failure_threshold = 0,
failure_above_threshold=True,
abs_tol = 1e-2,
alpha = 1e-1,
n_init = 64,
init_samples = None,
batch_sampler = qp.PFSampleErrorDensityAR(verbose=True),
n_batch = 16,
n_limit = 128,
n_approx = 2**18,
gpytorch_prior_mean = gpytorch.means.ZeroMean(),
gpytorch_prior_cov = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.MaternKernel(nu=1.5,
lengthscale_constraint = gpytorch.constraints.Interval(.1,1)
),
outputscale_constraint = gpytorch.constraints.Interval(1e-3,.5)
),
gpytorch_likelihood = gpytorch.likelihoods.GaussianLikelihood(noise_constraint = gpytorch.constraints.Interval(1e-12,1e-8)),
gpytorch_marginal_log_likelihood_func = lambda likelihood,gpyt_model: gpytorch.mlls.ExactMarginalLogLikelihood(likelihood,gpyt_model),
torch_optimizer_func = lambda gpyt_model: torch.optim.Adam(gpyt_model.parameters(),lr=0.1),
gpytorch_train_iter = 800,
gpytorch_use_gpu = gpytorch_use_gpu,
verbose = 200,
n_ref_approx = 2**22,
seed_ref_approx = None)
solution,data = mcispfgp.integrate(seed=7,refit=False)
print(data)
df = pd.DataFrame(data.get_results_dict())
print("\nIteration Summary")
print(df)
fig = data.plot()
df.to_csv("outputs/Multimodal2D_df.csv",index=False)
fig.savefig("outputs/Multimodal2D_fig.png",dpi=256,transparent=True,bbox_inches="tight")
mcispfgp = qp.PFGPCI(
integrand = qp.Multimodal2d(qp.DigitalNetB2(2,seed=17)),
failure_threshold = 0,
failure_above_threshold=True,
abs_tol = 1e-2,
alpha = 1e-1,
n_init = 64,
init_samples = None,
batch_sampler = qp.PFSampleErrorDensityAR(verbose=True),
n_batch = 16,
n_limit = 128,
n_approx = 2**18,
gpytorch_prior_mean = gpytorch.means.ZeroMean(),
gpytorch_prior_cov = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.MaternKernel(nu=1.5,
lengthscale_constraint = gpytorch.constraints.Interval(.1,1)
),
outputscale_constraint = gpytorch.constraints.Interval(1e-3,.5)
),
gpytorch_likelihood = gpytorch.likelihoods.GaussianLikelihood(noise_constraint = gpytorch.constraints.Interval(1e-12,1e-8)),
gpytorch_marginal_log_likelihood_func = lambda likelihood,gpyt_model: gpytorch.mlls.ExactMarginalLogLikelihood(likelihood,gpyt_model),
torch_optimizer_func = lambda gpyt_model: torch.optim.Adam(gpyt_model.parameters(),lr=0.1),
gpytorch_train_iter = 800,
gpytorch_use_gpu = gpytorch_use_gpu,
verbose = 200,
n_ref_approx = 2**22,
seed_ref_approx = None)
solution,data = mcispfgp.integrate(seed=7,refit=False)
print(data)
df = pd.DataFrame(data.get_results_dict())
print("\nIteration Summary")
print(df)
fig = data.plot()
df.to_csv("outputs/Multimodal2D_df.csv",index=False)
fig.savefig("outputs/Multimodal2D_fig.png",dpi=256,transparent=True,bbox_inches="tight")
reference approximation with d=2: 0.30207324028015137
batch 0
gpytorch model fitting
iter 200 of 800
likelihood.noise_covar.raw_noise.................. 1.45e+00
covar_module.raw_outputscale...................... 2.79e+00
covar_module.base_kernel.raw_lengthscale.......... -3.02e+00
iter 400 of 800
likelihood.noise_covar.raw_noise.................. 1.48e+00
covar_module.raw_outputscale...................... 3.54e+00
covar_module.base_kernel.raw_lengthscale.......... -3.33e+00
iter 600 of 800
likelihood.noise_covar.raw_noise.................. 1.52e+00
covar_module.raw_outputscale...................... 4.07e+00
covar_module.base_kernel.raw_lengthscale.......... -3.46e+00
iter 800 of 800
likelihood.noise_covar.raw_noise.................. 1.56e+00
covar_module.raw_outputscale...................... 4.48e+00
covar_module.base_kernel.raw_lengthscale.......... -3.51e+00
batch 1
AR sampling with efficiency 4.9e-02, expect 323 draws: 224, 364, 392, 406,
batch 2
AR sampling with efficiency 3.3e-02, expect 485 draws: 336, 609, 735, 798, 819, 840, 861, 882,
batch 3
AR sampling with efficiency 2.1e-02, expect 764 draws: 528, 891, 1089,
batch 4
AR sampling with efficiency 2.3e-02, expect 692 draws: 480, 660, 750, 810,
PFGPCIData (Data)
solution 0.300
error_bound 0.090
bound_low 0.210
bound_high 0.390
n_total 2^(7)
time_integrate 1.557
PFGPCI (AbstractStoppingCriterion)
abs_tol 0.010
n_init 2^(6)
n_limit 2^(7)
n_batch 2^(4)
Multimodal2d (AbstractIntegrand)
Uniform (AbstractTrueMeasure)
lower_bound [-4 -3]
upper_bound [7 8]
DigitalNetB2 (AbstractLDDiscreteDistribution)
d 2^(1)
replications 1
randomize LMS DS
gen_mats_source joe_kuo.6.21201.txt
order RADICAL INVERSE
t 63
alpha 1
n_limit 2^(32)
entropy 17
Iteration Summary
iter n_sum n_batch error_bounds ci_low ci_high solutions solutions_ref error_ref in_ci
0 0 64 64 2.5e-01 2.7e-02 5.2e-01 2.7e-01 3.0e-01 2.8e-02 True
1 1 80 16 1.6e-01 1.2e-01 4.5e-01 2.9e-01 3.0e-01 1.5e-02 True
2 2 96 16 1.0e-01 1.8e-01 3.9e-01 2.9e-01 3.0e-01 1.3e-02 True
3 3 112 16 1.2e-01 1.8e-01 4.1e-01 3.0e-01 3.0e-01 6.1e-03 True
4 4 128 16 9.0e-02 2.1e-01 3.9e-01 3.0e-01 3.0e-01 2.4e-03 True
Four Branch 2d Problem¶
In [6]:
Copied!
mcispfgp = qp.PFGPCI(
integrand = qp.FourBranch2d(qp.DigitalNetB2(2,seed=17)),
failure_threshold = 0,
failure_above_threshold=True,
abs_tol = 1e-2,
alpha = 1e-1,
n_init = 64,
init_samples = None,
batch_sampler = qp.PFSampleErrorDensityAR(verbose=True),
n_batch = 12,
n_limit = 200,
n_approx = 2**18,
gpytorch_prior_mean = gpytorch.means.ZeroMean(),
gpytorch_prior_cov = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.MaternKernel(nu=1.5,
lengthscale_constraint = gpytorch.constraints.Interval(.5,1)
),
outputscale_constraint = gpytorch.constraints.Interval(1e-8,.5)
),
gpytorch_likelihood = gpytorch.likelihoods.GaussianLikelihood(noise_constraint = gpytorch.constraints.Interval(1e-12,1e-8)),
gpytorch_marginal_log_likelihood_func = lambda likelihood,gpyt_model: gpytorch.mlls.ExactMarginalLogLikelihood(likelihood,gpyt_model),
torch_optimizer_func = lambda gpyt_model: torch.optim.Adam(gpyt_model.parameters(),lr=0.1),
gpytorch_train_iter = 800,
gpytorch_use_gpu = gpytorch_use_gpu,
verbose = 200,
n_ref_approx = 2**22,
seed_ref_approx = None)
solution,data = mcispfgp.integrate(seed=7,refit=False)
print(data)
df = pd.DataFrame(data.get_results_dict())
print("\nIteration Summary")
print(df)
fig = data.plot()
df.to_csv("outputs/FourBranch2D_df.csv",index=False)
fig.savefig("outputs/FourBranch2D_fig.png",dpi=256,transparent=True,bbox_inches="tight")
mcispfgp = qp.PFGPCI(
integrand = qp.FourBranch2d(qp.DigitalNetB2(2,seed=17)),
failure_threshold = 0,
failure_above_threshold=True,
abs_tol = 1e-2,
alpha = 1e-1,
n_init = 64,
init_samples = None,
batch_sampler = qp.PFSampleErrorDensityAR(verbose=True),
n_batch = 12,
n_limit = 200,
n_approx = 2**18,
gpytorch_prior_mean = gpytorch.means.ZeroMean(),
gpytorch_prior_cov = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.MaternKernel(nu=1.5,
lengthscale_constraint = gpytorch.constraints.Interval(.5,1)
),
outputscale_constraint = gpytorch.constraints.Interval(1e-8,.5)
),
gpytorch_likelihood = gpytorch.likelihoods.GaussianLikelihood(noise_constraint = gpytorch.constraints.Interval(1e-12,1e-8)),
gpytorch_marginal_log_likelihood_func = lambda likelihood,gpyt_model: gpytorch.mlls.ExactMarginalLogLikelihood(likelihood,gpyt_model),
torch_optimizer_func = lambda gpyt_model: torch.optim.Adam(gpyt_model.parameters(),lr=0.1),
gpytorch_train_iter = 800,
gpytorch_use_gpu = gpytorch_use_gpu,
verbose = 200,
n_ref_approx = 2**22,
seed_ref_approx = None)
solution,data = mcispfgp.integrate(seed=7,refit=False)
print(data)
df = pd.DataFrame(data.get_results_dict())
print("\nIteration Summary")
print(df)
fig = data.plot()
df.to_csv("outputs/FourBranch2D_df.csv",index=False)
fig.savefig("outputs/FourBranch2D_fig.png",dpi=256,transparent=True,bbox_inches="tight")
reference approximation with d=2: 0.20871806144714355
batch 0
gpytorch model fitting
iter 200 of 800
likelihood.noise_covar.raw_noise.................. 2.88e+00
covar_module.raw_outputscale...................... 3.85e+00
covar_module.base_kernel.raw_lengthscale.......... -4.50e+00
iter 400 of 800
likelihood.noise_covar.raw_noise.................. 3.63e+00
covar_module.raw_outputscale...................... 4.75e+00
covar_module.base_kernel.raw_lengthscale.......... -5.44e+00
iter 600 of 800
likelihood.noise_covar.raw_noise.................. 4.16e+00
covar_module.raw_outputscale...................... 5.33e+00
covar_module.base_kernel.raw_lengthscale.......... -6.04e+00
iter 800 of 800
likelihood.noise_covar.raw_noise.................. 4.58e+00
covar_module.raw_outputscale...................... 5.77e+00
covar_module.base_kernel.raw_lengthscale.......... -6.48e+00
batch 1
AR sampling with efficiency 8.2e-03, expect 1470 draws: 1020, 1360, 1530, 1615,
batch 2
AR sampling with efficiency 5.1e-03, expect 2344 draws: 1632, 2312, 2856, 3400, 3944, 4080, 4216, 4352, 4488, 4624,
batch 3
AR sampling with efficiency 3.5e-03, expect 3474 draws: 2412, 3216, 3618, 3819, 4020,
batch 4
AR sampling with efficiency 2.5e-03, expect 4789 draws: 3324, 5540, 6925,
batch 5
AR sampling with efficiency 2.1e-03, expect 5641 draws: 3912, 5868, 6194, 6520, 6846,
PFGPCIData (Data)
solution 0.209
error_bound 0.008
bound_low 0.202
bound_high 0.217
n_total 124
time_integrate 1.850
PFGPCI (AbstractStoppingCriterion)
abs_tol 0.010
n_init 2^(6)
n_limit 200
n_batch 12
FourBranch2d (AbstractIntegrand)
Uniform (AbstractTrueMeasure)
lower_bound -8
upper_bound 2^(3)
DigitalNetB2 (AbstractLDDiscreteDistribution)
d 2^(1)
replications 1
randomize LMS DS
gen_mats_source joe_kuo.6.21201.txt
order RADICAL INVERSE
t 63
alpha 1
n_limit 2^(32)
entropy 17
Iteration Summary
iter n_sum n_batch error_bounds ci_low ci_high solutions solutions_ref error_ref in_ci
0 0 64 64 4.1e-02 1.7e-01 2.5e-01 2.1e-01 2.1e-01 3.0e-03 True
1 1 76 12 2.6e-02 1.9e-01 2.4e-01 2.1e-01 2.1e-01 5.4e-03 True
2 2 88 12 1.7e-02 1.9e-01 2.3e-01 2.1e-01 2.1e-01 1.9e-03 True
3 3 100 12 1.3e-02 2.0e-01 2.2e-01 2.1e-01 2.1e-01 1.0e-03 True
4 4 112 12 1.1e-02 2.0e-01 2.2e-01 2.1e-01 2.1e-01 9.1e-05 True
5 5 124 12 7.6e-03 2.0e-01 2.2e-01 2.1e-01 2.1e-01 5.5e-04 True
Ishigami 3d Problem¶
In [16]:
Copied!
mcispfgp = qp.PFGPCI(
integrand = qp.Ishigami(qp.DigitalNetB2(3,seed=17)),
failure_threshold = 0,
failure_above_threshold=False,
abs_tol = 1e-2,
alpha = 1e-1,
n_init = 128,
init_samples = None,
batch_sampler = qp.PFSampleErrorDensityAR(verbose=True),
n_batch = 16,
n_limit = 256,
n_approx = 2**18,
gpytorch_prior_mean = gpytorch.means.ZeroMean(),
gpytorch_prior_cov = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.MaternKernel(nu=2.5,
lengthscale_constraint = gpytorch.constraints.Interval(.5,1)
),
outputscale_constraint = gpytorch.constraints.Interval(1e-8,.5)
),
gpytorch_likelihood = gpytorch.likelihoods.GaussianLikelihood(noise_constraint = gpytorch.constraints.Interval(1e-12,1e-8)),
gpytorch_marginal_log_likelihood_func = lambda likelihood,gpyt_model: gpytorch.mlls.ExactMarginalLogLikelihood(likelihood,gpyt_model),
torch_optimizer_func = lambda gpyt_model: torch.optim.Adam(gpyt_model.parameters(),lr=0.1),
gpytorch_train_iter = 800,
gpytorch_use_gpu = gpytorch_use_gpu,
verbose = 200,
n_ref_approx = 2**22,
seed_ref_approx = None)
solution,data = mcispfgp.integrate(seed=7,refit=False)
print(data)
df = pd.DataFrame(data.get_results_dict())
print("\nIteration Summary")
print(df)
pyplot.rcParams['font.size'] = 15
pyplot.rcParams['legend.fontsize'] = 15
fig = data.plot()
fig.tight_layout()
df.to_csv("outputs/Ishigami3D_df.csv",index=False)
fig.savefig("outputs/Ishigami3D_fig.png",dpi=256,transparent=True,bbox_inches="tight")
mcispfgp = qp.PFGPCI(
integrand = qp.Ishigami(qp.DigitalNetB2(3,seed=17)),
failure_threshold = 0,
failure_above_threshold=False,
abs_tol = 1e-2,
alpha = 1e-1,
n_init = 128,
init_samples = None,
batch_sampler = qp.PFSampleErrorDensityAR(verbose=True),
n_batch = 16,
n_limit = 256,
n_approx = 2**18,
gpytorch_prior_mean = gpytorch.means.ZeroMean(),
gpytorch_prior_cov = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.MaternKernel(nu=2.5,
lengthscale_constraint = gpytorch.constraints.Interval(.5,1)
),
outputscale_constraint = gpytorch.constraints.Interval(1e-8,.5)
),
gpytorch_likelihood = gpytorch.likelihoods.GaussianLikelihood(noise_constraint = gpytorch.constraints.Interval(1e-12,1e-8)),
gpytorch_marginal_log_likelihood_func = lambda likelihood,gpyt_model: gpytorch.mlls.ExactMarginalLogLikelihood(likelihood,gpyt_model),
torch_optimizer_func = lambda gpyt_model: torch.optim.Adam(gpyt_model.parameters(),lr=0.1),
gpytorch_train_iter = 800,
gpytorch_use_gpu = gpytorch_use_gpu,
verbose = 200,
n_ref_approx = 2**22,
seed_ref_approx = None)
solution,data = mcispfgp.integrate(seed=7,refit=False)
print(data)
df = pd.DataFrame(data.get_results_dict())
print("\nIteration Summary")
print(df)
pyplot.rcParams['font.size'] = 15
pyplot.rcParams['legend.fontsize'] = 15
fig = data.plot()
fig.tight_layout()
df.to_csv("outputs/Ishigami3D_df.csv",index=False)
fig.savefig("outputs/Ishigami3D_fig.png",dpi=256,transparent=True,bbox_inches="tight")
reference approximation with d=3: 0.1623830795288086
batch 0
gpytorch model fitting
iter 200 of 800
likelihood.noise_covar.raw_noise.................. 2.21e+00
covar_module.raw_outputscale...................... 3.42e+00
covar_module.base_kernel.raw_lengthscale.......... -4.08e+00
iter 400 of 800
likelihood.noise_covar.raw_noise.................. 2.70e+00
covar_module.raw_outputscale...................... 4.25e+00
covar_module.base_kernel.raw_lengthscale.......... -4.98e+00
iter 600 of 800
likelihood.noise_covar.raw_noise.................. 3.12e+00
covar_module.raw_outputscale...................... 4.81e+00
covar_module.base_kernel.raw_lengthscale.......... -5.56e+00
iter 800 of 800
likelihood.noise_covar.raw_noise.................. 3.48e+00
covar_module.raw_outputscale...................... 5.23e+00
covar_module.base_kernel.raw_lengthscale.......... -5.99e+00
batch 1
AR sampling with efficiency 6.2e-03, expect 2589 draws: 1792, 2240, 2576,
batch 2
AR sampling with efficiency 5.1e-03, expect 3135 draws: 2176, 2992, 3672, 4080, 4352, 4624, 4896,
batch 3
AR sampling with efficiency 4.3e-03, expect 3700 draws: 2560, 3040,
batch 4
AR sampling with efficiency 3.8e-03, expect 4245 draws: 2944, 4600, 4968,
batch 5
AR sampling with efficiency 3.3e-03, expect 4897 draws: 3392, 4876, 5936, 6360, 6572,
batch 6
AR sampling with efficiency 2.9e-03, expect 5482 draws: 3808, 4284,
batch 7
AR sampling with efficiency 2.6e-03, expect 6246 draws: 4336, 5691,
batch 8
AR sampling with efficiency 2.3e-03, expect 6824 draws: 4736, 5328,
PFGPCIData (Data)
solution 0.162
error_bound 0.010
bound_low 0.152
bound_high 0.172
n_total 2^(8)
time_integrate 4.859
PFGPCI (AbstractStoppingCriterion)
abs_tol 0.010
n_init 2^(7)
n_limit 2^(8)
n_batch 2^(4)
Ishigami (AbstractIntegrand)
Uniform (AbstractTrueMeasure)
lower_bound -3.142
upper_bound 3.142
DigitalNetB2 (AbstractLDDiscreteDistribution)
d 3
replications 1
randomize LMS DS
gen_mats_source joe_kuo.6.21201.txt
order RADICAL INVERSE
t 63
alpha 1
n_limit 2^(32)
entropy 17
Iteration Summary
iter n_sum n_batch error_bounds ci_low ci_high solutions solutions_ref error_ref in_ci
0 0 128 128 3.1e-02 1.4e-01 2.0e-01 1.7e-01 1.6e-01 8.6e-03 True
1 1 144 16 2.6e-02 1.4e-01 2.0e-01 1.7e-01 1.6e-01 7.7e-03 True
2 2 160 16 2.2e-02 1.5e-01 1.9e-01 1.7e-01 1.6e-01 5.7e-03 True
3 3 176 16 1.9e-02 1.5e-01 1.8e-01 1.7e-01 1.6e-01 3.0e-03 True
4 4 192 16 1.6e-02 1.5e-01 1.8e-01 1.7e-01 1.6e-01 2.7e-03 True
5 5 208 16 1.5e-02 1.5e-01 1.8e-01 1.6e-01 1.6e-01 1.8e-03 True
6 6 224 16 1.3e-02 1.5e-01 1.8e-01 1.6e-01 1.6e-01 1.4e-03 True
7 7 240 16 1.2e-02 1.5e-01 1.7e-01 1.6e-01 1.6e-01 8.7e-04 True
8 8 256 16 1.0e-02 1.5e-01 1.7e-01 1.6e-01 1.6e-01 3.5e-04 True
Hartmann 6d Problem¶
In [ ]:
Copied!
mcispfgp = qp.PFGPCI(
integrand = qp.Hartmann6d(qp.DigitalNetB2(6,seed=17)),
failure_threshold = -2,
failure_above_threshold=False,
abs_tol = 2.5e-3,
alpha = 1e-1,
n_init = 512,
init_samples = None,
batch_sampler = qp.PFSampleErrorDensityAR(verbose=True),
n_batch = 64,
n_limit = 2500,
n_approx = 2**18,
gpytorch_prior_mean = gpytorch.means.ZeroMean(),
gpytorch_prior_cov = gpytorch.kernels.ScaleKernel(gpytorch.kernels.MaternKernel(nu=2.5)),
gpytorch_likelihood = gpytorch.likelihoods.GaussianLikelihood(noise_constraint = gpytorch.constraints.Interval(1e-12,1e-8)),
gpytorch_marginal_log_likelihood_func = lambda likelihood,gpyt_model: gpytorch.mlls.ExactMarginalLogLikelihood(likelihood,gpyt_model),
torch_optimizer_func = lambda gpyt_model: torch.optim.Adam(gpyt_model.parameters(),lr=0.1),
gpytorch_train_iter = 150,
gpytorch_use_gpu = gpytorch_use_gpu,
verbose = 50,
n_ref_approx = 2**23,
seed_ref_approx = None)
solution,data = mcispfgp.integrate(seed=7,refit=False)
print(data)
df = pd.DataFrame(data.get_results_dict())
print("\nIteration Summary")
print(df)
pyplot.rcParams['font.size'] = 15
pyplot.rcParams['legend.fontsize'] = 15
fig = data.plot()
fig.tight_layout()
df.to_csv("outputs/Hartmann6D_df.csv",index=False)
fig.savefig("outputs/Hartmann6D_fig.png",dpi=256,transparent=True,bbox_inches="tight")
mcispfgp = qp.PFGPCI(
integrand = qp.Hartmann6d(qp.DigitalNetB2(6,seed=17)),
failure_threshold = -2,
failure_above_threshold=False,
abs_tol = 2.5e-3,
alpha = 1e-1,
n_init = 512,
init_samples = None,
batch_sampler = qp.PFSampleErrorDensityAR(verbose=True),
n_batch = 64,
n_limit = 2500,
n_approx = 2**18,
gpytorch_prior_mean = gpytorch.means.ZeroMean(),
gpytorch_prior_cov = gpytorch.kernels.ScaleKernel(gpytorch.kernels.MaternKernel(nu=2.5)),
gpytorch_likelihood = gpytorch.likelihoods.GaussianLikelihood(noise_constraint = gpytorch.constraints.Interval(1e-12,1e-8)),
gpytorch_marginal_log_likelihood_func = lambda likelihood,gpyt_model: gpytorch.mlls.ExactMarginalLogLikelihood(likelihood,gpyt_model),
torch_optimizer_func = lambda gpyt_model: torch.optim.Adam(gpyt_model.parameters(),lr=0.1),
gpytorch_train_iter = 150,
gpytorch_use_gpu = gpytorch_use_gpu,
verbose = 50,
n_ref_approx = 2**23,
seed_ref_approx = None)
solution,data = mcispfgp.integrate(seed=7,refit=False)
print(data)
df = pd.DataFrame(data.get_results_dict())
print("\nIteration Summary")
print(df)
pyplot.rcParams['font.size'] = 15
pyplot.rcParams['legend.fontsize'] = 15
fig = data.plot()
fig.tight_layout()
df.to_csv("outputs/Hartmann6D_df.csv",index=False)
fig.savefig("outputs/Hartmann6D_fig.png",dpi=256,transparent=True,bbox_inches="tight")
reference approximation with d=6: 0.007385849952697754
batch 0
gpytorch model fitting
iter 50 of 150
likelihood.noise_covar.raw_noise.................. 1.28e+00
covar_module.raw_outputscale...................... 5.62e-01
covar_module.base_kernel.raw_lengthscale.......... 1.90e-01
iter 100 of 150
likelihood.noise_covar.raw_noise.................. 2.28e+00
covar_module.raw_outputscale...................... 5.14e-01
covar_module.base_kernel.raw_lengthscale.......... 1.83e-01
iter 150 of 150
likelihood.noise_covar.raw_noise.................. 2.84e+00
covar_module.raw_outputscale...................... 5.12e-01
covar_module.base_kernel.raw_lengthscale.......... 1.83e-01
batch 1
AR sampling with efficiency 2.5e-03, expect 25281 draws: 17536, 19180,
batch 2
AR sampling with efficiency 2.5e-03, expect 25500 draws: 17664, 25944, 27600,
batch 3
AR sampling with efficiency 2.0e-03, expect 31766 draws: 22016, 30272, 31992, 33368, 34056, 34744, 35088, 35432, 35776, 36120, 36464, 36808,
batch 4
AR sampling with efficiency 1.7e-03, expect 36697 draws: 25472, 31840, 34626, 35024, 35422,
batch 5
AR sampling with efficiency 1.5e-03, expect 42097 draws: 29184, 42408, 46056,
batch 6
AR sampling with efficiency 1.3e-03, expect 50094 draws: 34752, 45612, 46155, 46698,
batch 7
AR sampling with efficiency 1.1e-03, expect 55935 draws: 38784, 49086, 53328,
batch 8
AR sampling with efficiency 1.0e-03, expect 63192 draws: 43840, 56855,
batch 9
AR sampling with efficiency 9.4e-04, expect 68419 draws: 47424, 57798, 63726, 65949, 68172,
batch 10
AR sampling with efficiency 8.8e-04, expect 72820 draws: 50496, 65487, 67854, 69432, 71010, 71799, 72588,
batch 11
AR sampling with efficiency 8.1e-04, expect 79266 draws: 54976, 65284, 69579, 70438, 71297,
batch 12
AR sampling with efficiency 7.6e-04, expect 84428 draws: 58560, 61305, 63135, 64050, 64965,
batch 13
AR sampling with efficiency 7.0e-04, expect 91332 draws: 63296, 74175, 75164, 76153, 77142, 78131,
batch 14
AR sampling with efficiency 6.7e-04, expect 95110 draws: 65920, 79310, 83430,
batch 15
AR sampling with efficiency 6.4e-04, expect 99354 draws: 68864, 88232, 90384, 92536, 94688,
batch 16
AR sampling with efficiency 6.2e-04, expect 103811 draws: 71936, 104532, 115772, 116896, 118020,
batch 17
AR sampling with efficiency 6.0e-04, expect 106125 draws: 73600, 83950, 86250,
batch 18
AR sampling with efficiency 5.7e-04, expect 111408 draws: 77248, 90525, 95353,
batch 19
AR sampling with efficiency 5.5e-04, expect 116498 draws: 80768, 99698, 112318, 116104, 118628,
batch 20
AR sampling with efficiency 5.3e-04, expect 120838 draws: 83776, 102102, 111265, 112574,
batch 21
AR sampling with efficiency 5.1e-04, expect 124745 draws: 86464, 114835, 131047, 133749, 135100, 136451,
PFGPCIData (Data)
solution 0.007
error_bound 0.002
bound_low 0.005
bound_high 0.010
n_total 1856
time_integrate 114.951
PFGPCI (AbstractStoppingCriterion)
abs_tol 0.003
n_init 2^(9)
n_limit 2500
n_batch 2^(6)
Hartmann6d (AbstractIntegrand)
Uniform (AbstractTrueMeasure)
lower_bound 0
upper_bound 1
DigitalNetB2 (AbstractLDDiscreteDistribution)
d 6
replications 1
randomize LMS DS
gen_mats_source joe_kuo.6.21201.txt
order RADICAL INVERSE
t 63
alpha 1
n_limit 2^(32)
entropy 17
Iteration Summary
iter n_sum n_batch error_bounds ci_low ci_high solutions solutions_ref error_ref in_ci
0 0 512 512 1.3e-02 0.0e+00 1.6e-02 3.3e-03 7.4e-03 4.1e-03 True
1 1 576 64 1.3e-02 0.0e+00 1.9e-02 6.9e-03 7.4e-03 5.1e-04 True
2 2 640 64 1.0e-02 0.0e+00 1.7e-02 7.2e-03 7.4e-03 1.5e-04 True
3 3 704 64 8.7e-03 0.0e+00 1.6e-02 7.4e-03 7.4e-03 8.2e-06 True
4 4 768 64 7.6e-03 0.0e+00 1.5e-02 7.4e-03 7.4e-03 4.4e-06 True
5 5 832 64 6.4e-03 1.0e-03 1.4e-02 7.4e-03 7.4e-03 4.1e-05 True
6 6 896 64 5.7e-03 1.7e-03 1.3e-02 7.5e-03 7.4e-03 8.0e-05 True
7 7 960 64 5.1e-03 2.4e-03 1.3e-02 7.5e-03 7.4e-03 8.0e-05 True
8 8 1024 64 4.7e-03 2.7e-03 1.2e-02 7.4e-03 7.4e-03 2.2e-05 True
9 9 1088 64 4.4e-03 3.0e-03 1.2e-02 7.4e-03 7.4e-03 2.2e-05 True
10 10 1152 64 4.0e-03 3.4e-03 1.1e-02 7.4e-03 7.4e-03 3.2e-06 True
11 11 1216 64 3.8e-03 3.6e-03 1.1e-02 7.4e-03 7.4e-03 1.8e-05 True
12 12 1280 64 3.5e-03 3.9e-03 1.1e-02 7.4e-03 7.4e-03 1.5e-05 True
13 13 1344 64 3.4e-03 4.0e-03 1.1e-02 7.4e-03 7.4e-03 1.8e-05 True
14 14 1408 64 3.2e-03 4.2e-03 1.1e-02 7.4e-03 7.4e-03 1.5e-05 True
15 15 1472 64 3.1e-03 4.3e-03 1.0e-02 7.4e-03 7.4e-03 6.0e-07 True
16 16 1536 64 3.0e-03 4.4e-03 1.0e-02 7.4e-03 7.4e-03 7.0e-06 True
17 17 1600 64 2.9e-03 4.5e-03 1.0e-02 7.4e-03 7.4e-03 1.8e-05 True
18 18 1664 64 2.7e-03 4.6e-03 1.0e-02 7.4e-03 7.4e-03 6.0e-07 True
19 19 1728 64 2.6e-03 4.7e-03 1.0e-02 7.4e-03 7.4e-03 6.0e-07 True
20 20 1792 64 2.6e-03 4.8e-03 9.9e-03 7.4e-03 7.4e-03 2.0e-05 True
21 21 1856 64 2.5e-03 4.9e-03 9.9e-03 7.4e-03 7.4e-03 2.0e-05 True
Tsunami¶
In [3]:
Copied!
import umbridge
import umbridge
In [4]:
Copied!
!docker run --name tsunami -d -it -p 4242:4242 linusseelinger/model-exahype-tsunami:latest # https://um-bridge-benchmarks.readthedocs.io/en/docs/models/exahype-tsunami.html
!docker run --name tsunami -d -it -p 4242:4242 linusseelinger/model-exahype-tsunami:latest # https://um-bridge-benchmarks.readthedocs.io/en/docs/models/exahype-tsunami.html
211181b555f7cfc0b32abb7e147d8dea2321e9d4c958c6f9746dbfeacd95ec06
In [5]:
Copied!
ld_sampler = qp.DigitalNetB2(2,seed=7)
origin_distrib = qp.Uniform(ld_sampler,lower_bound=[-239,-339],upper_bound=[739,339])
umbridge_tsu_model = umbridge.HTTPModel('http://localhost:4242','forward')
umbridge_config = {'d': origin_distrib.d, 'level':1}
qmcpy_umbridge_tsu_model = qp.UMBridgeWrapper(origin_distrib,umbridge_tsu_model,umbridge_config,parallel=True)
ld_sampler = qp.DigitalNetB2(2,seed=7)
origin_distrib = qp.Uniform(ld_sampler,lower_bound=[-239,-339],upper_bound=[739,339])
umbridge_tsu_model = umbridge.HTTPModel('http://localhost:4242','forward')
umbridge_config = {'d': origin_distrib.d, 'level':1}
qmcpy_umbridge_tsu_model = qp.UMBridgeWrapper(origin_distrib,umbridge_tsu_model,umbridge_config,parallel=True)
In [6]:
Copied!
class TsunamiMaxWaveHeightBouy1(qp.Integrand):
# https://um-bridge-benchmarks.readthedocs.io/en/docs/models/exahype-tsunami.html#
def __init__(self, qmcpy_umbridge_tsu_model):
self.tsu_model = qmcpy_umbridge_tsu_model
self.true_measure = self.sampler = self.tsu_model.true_measure
assert self.tsu_model.d == 2
super(TsunamiMaxWaveHeightBouy1,self).__init__(
dimension_indv = (),
dimension_comb = (),
parallel = False,
threadpool = False)
def g(self, t):
assert t.ndim==2 and t.shape[1]==self.d
y = self.tsu_model.g(t)
y_buoy1_height = 1000*y[1] # max wave height at buoy (meters SSHA)
height_above_thresh = y_buoy1_height
return height_above_thresh
tmwhb1 = TsunamiMaxWaveHeightBouy1(qmcpy_umbridge_tsu_model)
class TsunamiMaxWaveHeightBouy1(qp.Integrand):
# https://um-bridge-benchmarks.readthedocs.io/en/docs/models/exahype-tsunami.html#
def __init__(self, qmcpy_umbridge_tsu_model):
self.tsu_model = qmcpy_umbridge_tsu_model
self.true_measure = self.sampler = self.tsu_model.true_measure
assert self.tsu_model.d == 2
super(TsunamiMaxWaveHeightBouy1,self).__init__(
dimension_indv = (),
dimension_comb = (),
parallel = False,
threadpool = False)
def g(self, t):
assert t.ndim==2 and t.shape[1]==self.d
y = self.tsu_model.g(t)
y_buoy1_height = 1000*y[1] # max wave height at buoy (meters SSHA)
height_above_thresh = y_buoy1_height
return height_above_thresh
tmwhb1 = TsunamiMaxWaveHeightBouy1(qmcpy_umbridge_tsu_model)
In [7]:
Copied!
mcispfgp = qp.PFGPCI(
integrand = tmwhb1,
failure_threshold = 3,
failure_above_threshold=True,
abs_tol = 1e-3,
alpha = 1e-1,
n_init = 64,
init_samples = None,
batch_sampler = qp.PFSampleErrorDensityAR(verbose=True),
n_batch = 8,
n_limit = 128,
n_approx = 2**20,
gpytorch_prior_mean = gpytorch.means.ConstantMean(),
gpytorch_prior_cov = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.MaternKernel(nu=2.5,
lengthscale_constraint = gpytorch.constraints.Interval(.1,1)),
outputscale_constraint = gpytorch.constraints.Interval(1e-8,1)),
gpytorch_likelihood = gpytorch.likelihoods.GaussianLikelihood(noise_constraint = gpytorch.constraints.Interval(1e-12,1e-10)),
gpytorch_marginal_log_likelihood_func = lambda likelihood,gpyt_model: gpytorch.mlls.ExactMarginalLogLikelihood(likelihood,gpyt_model),
torch_optimizer_func = lambda gpyt_model: torch.optim.Adam(gpyt_model.parameters(),lr=0.1),
gpytorch_train_iter = 1000,
gpytorch_use_gpu = gpytorch_use_gpu,
verbose = 1000,
n_ref_approx = 0)
solution,data = mcispfgp.integrate(seed=11,refit=False)
print(data)
df = pd.DataFrame(data.get_results_dict())
print("\nIteration Summary")
print(df)
pyplot.rcParams['font.size'] = 25
pyplot.rcParams['legend.fontsize'] = 25
fig = data.plot()
df.to_csv("outputs/Tsunami2D_df.csv",index=False)
fig.savefig("outputs/Tsunami2D_fig.png",dpi=256,transparent=True,bbox_inches="tight")
mcispfgp = qp.PFGPCI(
integrand = tmwhb1,
failure_threshold = 3,
failure_above_threshold=True,
abs_tol = 1e-3,
alpha = 1e-1,
n_init = 64,
init_samples = None,
batch_sampler = qp.PFSampleErrorDensityAR(verbose=True),
n_batch = 8,
n_limit = 128,
n_approx = 2**20,
gpytorch_prior_mean = gpytorch.means.ConstantMean(),
gpytorch_prior_cov = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.MaternKernel(nu=2.5,
lengthscale_constraint = gpytorch.constraints.Interval(.1,1)),
outputscale_constraint = gpytorch.constraints.Interval(1e-8,1)),
gpytorch_likelihood = gpytorch.likelihoods.GaussianLikelihood(noise_constraint = gpytorch.constraints.Interval(1e-12,1e-10)),
gpytorch_marginal_log_likelihood_func = lambda likelihood,gpyt_model: gpytorch.mlls.ExactMarginalLogLikelihood(likelihood,gpyt_model),
torch_optimizer_func = lambda gpyt_model: torch.optim.Adam(gpyt_model.parameters(),lr=0.1),
gpytorch_train_iter = 1000,
gpytorch_use_gpu = gpytorch_use_gpu,
verbose = 1000,
n_ref_approx = 0)
solution,data = mcispfgp.integrate(seed=11,refit=False)
print(data)
df = pd.DataFrame(data.get_results_dict())
print("\nIteration Summary")
print(df)
pyplot.rcParams['font.size'] = 25
pyplot.rcParams['legend.fontsize'] = 25
fig = data.plot()
df.to_csv("outputs/Tsunami2D_df.csv",index=False)
fig.savefig("outputs/Tsunami2D_fig.png",dpi=256,transparent=True,bbox_inches="tight")
batch 0
gpytorch model fitting
iter 1000 of 1000
likelihood.noise_covar.raw_noise.................. 1.17e+00
mean_module.raw_constant.......................... -2.25e+00
covar_module.raw_outputscale...................... 1.99e+00
covar_module.base_kernel.raw_lengthscale.......... -3.02e+00
batch 1
AR sampling with efficiency 1.9e-02, expect 426 draws: 296, 444, 555,
batch 2
AR sampling with efficiency 5.5e-03, expect 1450 draws: 1008, 1512, 1638,
batch 3
AR sampling with efficiency 1.1e-02, expect 757 draws: 528, 924, 990,
batch 4
AR sampling with efficiency 3.2e-03, expect 2489 draws: 1728, 3024,
batch 5
AR sampling with efficiency 2.5e-03, expect 3227 draws: 2240, 2800, 3360, 3640,
batch 6
AR sampling with efficiency 1.4e-03, expect 5698 draws: 3952,
batch 7
AR sampling with efficiency 1.7e-03, expect 4841 draws: 3360, 3780,
batch 8
AR sampling with efficiency 1.6e-03, expect 4905 draws: 3400, 4675, 5100,
PFGPCIData (Data)
solution 0.057
error_bound 0.005
bound_low 0.053
bound_high 0.062
n_total 2^(7)
time_integrate 1857.892
PFGPCI (AbstractStoppingCriterion)
abs_tol 0.001
n_init 2^(6)
n_limit 2^(7)
n_batch 2^(3)
TsunamiMaxWaveHeightBouy1 (AbstractIntegrand)
Uniform (AbstractTrueMeasure)
lower_bound [-239 -339]
upper_bound [739 339]
DigitalNetB2 (AbstractLDDiscreteDistribution)
d 2^(1)
replications 1
randomize LMS DS
gen_mats_source joe_kuo.6.21201.txt
order RADICAL INVERSE
t 63
alpha 1
n_limit 2^(32)
entropy 7
Iteration Summary
iter n_sum n_batch error_bounds ci_low ci_high solutions
0 0 64 64 9.4e-02 0.0e+00 1.5e-01 5.3e-02
1 1 72 8 2.8e-02 3.5e-02 9.0e-02 6.2e-02
2 2 80 8 5.3e-02 1.9e-02 1.2e-01 7.1e-02
3 3 88 8 1.6e-02 5.0e-02 8.2e-02 6.6e-02
4 4 96 8 1.2e-02 4.9e-02 7.4e-02 6.2e-02
5 5 104 8 7.0e-03 5.4e-02 6.8e-02 6.1e-02
6 6 112 8 8.3e-03 5.1e-02 6.8e-02 5.9e-02
7 7 120 8 8.2e-03 5.2e-02 6.8e-02 6.0e-02
8 8 128 8 4.5e-03 5.3e-02 6.2e-02 5.7e-02
In [30]:
Copied!
!docker rm -f tsunami
!docker rm -f tsunami
python(15698) MallocStackLogging: can't turn off malloc stack logging because it was not enabled.
tsunami
In [ ]:
Copied!