Skip to content

Utilities

plot_proj

Parameters:

Name Type Description Default
sampler (DiscreteDistribution, TrueMeasure)

The generator of samples to be plotted.

required
n Union[int, list]

The number of samples or a list of samples(used for extensibility) to be plotted.

64
d_horizontal Union[int, list]

The dimension or list of dimensions to be plotted on the horizontal axes.

1
d_vertical Union[int, list]

The dimension or list of dimensions to be plotted on the vertical axes.

2
math_ind bool

Setting to True will enable user to pass in math indices.

True
marker_size float

The marker size (typographic points are 1/72 in.).

5
figfac float

The figure size factor.

5
fig_title str

The title of the figure.

'Projection of Samples'
axis_pad float

The padding of the axis so that points on the boundaries can be seen.

0
want_grid bool

Setting to True will enable grid on the plot.

True
font_family str

The font family of the plot.

'sans-serif'
where_title float

the position of the title on the plot. Default value is 1.

1
**kwargs dict

Additional keyword arguments passed to matplotlib.pyplot.scatter.

{}
Source code in qmcpy/util/plot_functions.py
def plot_proj(
        sampler, 
        n = 64,
        d_horizontal = 1, 
        d_vertical = 2,
        math_ind = True, 
        marker_size = 5, 
        figfac = 5, 
        fig_title = 'Projection of Samples', 
        axis_pad = 0, 
        want_grid = True, 
        font_family = "sans-serif", 
        where_title = 1, 
        **kwargs):
    """
    Args:
        sampler (DiscreteDistribution,TrueMeasure): The generator of samples to be plotted.
        n (Union[int,list]): The number of samples or a list of samples(used for extensibility) to be plotted.
        d_horizontal (Union[int,list]): The dimension or list of dimensions to be plotted on the horizontal axes. 
        d_vertical (Union[int,list]): The dimension or list of dimensions to be plotted on the vertical axes. 
        math_ind (bool): Setting to `True` will enable user to pass in math indices.
        marker_size (float): The marker size (typographic points are 1/72 in.).
        figfac (float): The figure size factor.
        fig_title (str): The title of the figure.
        axis_pad (float): The padding of the axis so that points on the boundaries can be seen.
        want_grid (bool): Setting to `True` will enable grid on the plot.
        font_family (str): The font family of the plot.
        where_title (float): the position of the title on the plot. Default value is 1.
        **kwargs (dict): Additional keyword arguments passed to `matplotlib.pyplot.scatter`.
    """
    try:
        import matplotlib.pyplot as plt
        from matplotlib import colors
        dir_path = os.path.dirname(os.path.realpath(__file__))
        plt.style.use(os.path.join(dir_path, "qmcpy.mplstyle"))
    except:
        raise ImportError("Missing matplotlib.pyplot as plt, Matplotlib must be installed to run plot_proj function")
    plt.rcParams['font.family'] = font_family 
    n = np.atleast_1d(n)
    d_horizontal = np.atleast_1d(d_horizontal)
    d_vertical = np.atleast_1d(d_vertical)
    samples = sampler(n[n.size - 1])    
    d = samples.shape[1]
    fig, ax = plt.subplots(nrows=d_horizontal.size, ncols=d_vertical.size, figsize=(figfac*d_horizontal.size, figfac*d_vertical.size),squeeze=False)                    

    colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
    for i in range(d_horizontal.size):         
        for j in range(d_vertical.size):
                n_min = 0
                for m in range(n.size):
                    n_max = n[m]  
                    if(d_horizontal[i] == d_vertical[j]):
                        ax[i,j].remove()  
                        break
                    if(math_ind is True):
                        x = d_horizontal[i] - 1
                        y = d_vertical[j] - 1
                        x_label_num = d_horizontal[i]
                        y_label_num = d_vertical[j]
                    else:
                        x = d_horizontal[i]
                        y = d_vertical[j]
                        x_label_num = d_horizontal[i] + 1
                        y_label_num = d_vertical[j] + 1

                    if type(sampler).__name__=="AbstractDiscreteDistribution":
                        ax[i,j].set_xlim([0-axis_pad,1+axis_pad])
                        ax[i,j].set_ylim([0-axis_pad,1+axis_pad])
                        ax[i,j].set_xticks([0,1/4,1/2,3/4,1])
                        ax[i,j].set_yticks([0,1/4,1/2,3/4,1])
                        ax[i,j].set_aspect(1)
                        if not want_grid:
                            ax[i,j].grid(False) 
                            ax[i,j].tick_params(axis='both', which='both', direction='in', length=5)
                        ax[i,j].grid(want_grid)
                        x_label = r'$x_{i%d}$'%(x_label_num)
                        y_label = r'$x_{i%d}$'%(y_label_num)
                    else:
                        x_label = r'$t_{i%d}$'%(x_label_num)
                        y_label = r'$t_{i%d}$'%(y_label_num)    

                    ax[i,j].set_xlabel(x_label,fontsize = 15)
                    if(d > 1):
                        ax[i,j].set_ylabel(y_label,fontsize = 15)
                        y_axis = samples[n_min:n_max,y]
                    else:
                        y_axis = []
                        for h in range(n_max-n_min):
                            y_axis.append(0.5)
                    ax[i,j].scatter(samples[n_min:n_max,x],y_axis,s=marker_size,color=colors[m],label='n_min = %d, n_max = %d'%(n_min,n_max),**kwargs)
                    n_min = n[m]
    plt.suptitle(fig_title,fontsize = 20, y = where_title)
    #fig.text(0.55,0.55,fig_title, ha = 'center', va = 'center', fontsize = 20) %replaced by plt.suptitle
    fig.tight_layout()#pad=2)
    return fig, ax

mlmc_test

Multilevel Monte Carlo test routine.

Examples:

>>> fo = qp.FinancialOption(
...     sampler=qp.IIDStdUniform(seed=7),
...     option = "ASIAN",
...     asian_mean = "GEOMETRIC",
...     volatility = 0.2, 
...     start_price = 100, 
...     strike_price = 100, 
...     interest_rate = 0.05, 
...     t_final = 1)
>>> print('Exact Value: %s'%fo.get_exact_value_inf_dim())
Exact Value: 5.546818633789201
>>> mlmc_test(fo)
Convergence tests, kurtosis, telescoping sum check using N =  20000 samples
    l              ave(Pf-Pc)     ave(Pf)        var(Pf-Pc)     var(Pf)        kurtosis       check          cost
    0              5.4486e+00     5.4486e+00     5.673e+01      5.673e+01      0.00e+00       0.00e+00       2.00e+00
    1              1.4925e-01     5.5937e+00     3.839e+00      5.838e+01      5.48e+00       1.14e-02       4.00e+00
    2              3.5921e-02     5.6024e+00     9.585e-01      5.990e+01      5.59e+00       7.86e-02       8.00e+00
    3              8.7217e-03     5.5128e+00     2.332e-01      5.828e+01      5.36e+00       2.92e-01       1.60e+01
    4              1.9773e-03     5.6850e+00     6.021e-02      6.081e+01      5.46e+00       5.12e-01       3.20e+01
    5              9.5925e-04     5.5628e+00     1.512e-02      5.939e+01      5.37e+00       3.71e-01       6.40e+01
    6              8.5998e-04     5.5706e+00     3.773e-03      5.995e+01      5.48e+00       2.10e-02       1.28e+02
    7              1.3592e-04     5.4359e+00     9.285e-04      5.808e+01      5.51e+00       4.13e-01       2.56e+02
    8              3.4520e-05     5.5322e+00     2.313e-04      5.881e+01      5.57e+00       2.96e-01       5.12e+02
Linear regression estimates of MLMC parameters
    alpha = 1.617207  (exponent for MLMC weak convergence)
    beta  = 2.000355  (exponent for MLMC variance)
    gamma = 1.000000  (exponent for MLMC cost)
MLMC complexity tests
    rmse_tol       value          mlmc_cost      std_cost       savings        N_l
    5.000e-03      5.545e+00      3.339e+07      1.038e+08      3.11           8605392      1566846      559701       198886       70359        
    1.000e-02      5.539e+00      7.272e+06      1.243e+07      1.71           2009192      365451       130781       46623        
    2.000e-02      5.549e+00      1.827e+06      3.108e+06      1.70           503397       91821        33196        11736        
    5.000e-02      5.474e+00      2.324e+05      2.556e+05      1.10           71432        13143        4617         
    1.000e-01      5.466e+00      6.220e+04      6.389e+04      1.03           19477        3361         1225         

Parameters:

Name Type Description Default
integrand AbstractIntegrand

multilevel integrand

required
n int

number of samples for convergence tests

20000
l int

number of levels for convergence tests

8
n_init int

initial number of samples for MLMC calcs

200
rmse_tols ndarray

desired accuracy array for MLMC calcs

array([0.005, 0.01, 0.02, 0.05, 0.1])
levels_min int

minimum number of levels for MLMC calcs

2
levels_max int

maximum number of levels for MLMC calcs

10
Source code in qmcpy/util/mlmc_test.py
def mlmc_test(
    integrand,
    n = 20000,
    l = 8,
    n_init = 200,
    rmse_tols = np.array([.005, 0.01, 0.02, 0.05, 0.1]),
    levels_min = 2,
    levels_max = 10,
    ):
    r"""
    Multilevel Monte Carlo test routine.

    Examples:
        >>> fo = qp.FinancialOption(
        ...     sampler=qp.IIDStdUniform(seed=7),
        ...     option = "ASIAN",
        ...     asian_mean = "GEOMETRIC",
        ...     volatility = 0.2, 
        ...     start_price = 100, 
        ...     strike_price = 100, 
        ...     interest_rate = 0.05, 
        ...     t_final = 1)
        >>> print('Exact Value: %s'%fo.get_exact_value_inf_dim())
        Exact Value: 5.546818633789201
        >>> mlmc_test(fo)
        Convergence tests, kurtosis, telescoping sum check using N =  20000 samples
            l              ave(Pf-Pc)     ave(Pf)        var(Pf-Pc)     var(Pf)        kurtosis       check          cost
            0              5.4486e+00     5.4486e+00     5.673e+01      5.673e+01      0.00e+00       0.00e+00       2.00e+00
            1              1.4925e-01     5.5937e+00     3.839e+00      5.838e+01      5.48e+00       1.14e-02       4.00e+00
            2              3.5921e-02     5.6024e+00     9.585e-01      5.990e+01      5.59e+00       7.86e-02       8.00e+00
            3              8.7217e-03     5.5128e+00     2.332e-01      5.828e+01      5.36e+00       2.92e-01       1.60e+01
            4              1.9773e-03     5.6850e+00     6.021e-02      6.081e+01      5.46e+00       5.12e-01       3.20e+01
            5              9.5925e-04     5.5628e+00     1.512e-02      5.939e+01      5.37e+00       3.71e-01       6.40e+01
            6              8.5998e-04     5.5706e+00     3.773e-03      5.995e+01      5.48e+00       2.10e-02       1.28e+02
            7              1.3592e-04     5.4359e+00     9.285e-04      5.808e+01      5.51e+00       4.13e-01       2.56e+02
            8              3.4520e-05     5.5322e+00     2.313e-04      5.881e+01      5.57e+00       2.96e-01       5.12e+02
        Linear regression estimates of MLMC parameters
            alpha = 1.617207  (exponent for MLMC weak convergence)
            beta  = 2.000355  (exponent for MLMC variance)
            gamma = 1.000000  (exponent for MLMC cost)
        MLMC complexity tests
            rmse_tol       value          mlmc_cost      std_cost       savings        N_l
            5.000e-03      5.545e+00      3.339e+07      1.038e+08      3.11           8605392      1566846      559701       198886       70359        
            1.000e-02      5.539e+00      7.272e+06      1.243e+07      1.71           2009192      365451       130781       46623        
            2.000e-02      5.549e+00      1.827e+06      3.108e+06      1.70           503397       91821        33196        11736        
            5.000e-02      5.474e+00      2.324e+05      2.556e+05      1.10           71432        13143        4617         
            1.000e-01      5.466e+00      6.220e+04      6.389e+04      1.03           19477        3361         1225         

    Args:
        integrand (AbstractIntegrand): multilevel integrand
        n (int): number of samples for convergence tests
        l (int): number of levels for convergence tests
        n_init (int): initial number of samples for MLMC calcs
        rmse_tols (np.ndarray): desired accuracy array for MLMC calcs
        levels_min (int): minimum number of levels for MLMC calcs
        levels_max (int): maximum number of levels for MLMC calcs
    """
    # first, convergence tests
    n = 100*np.ceil(n/100) # make N a multiple of 100
    print('Convergence tests, kurtosis, telescoping sum check using N =%7d samples'%n)
    print('    %-15s%-15s%-15s%-15s%-15s%-15s%-15s%s'\
        %('l','ave(Pf-Pc)','ave(Pf)','var(Pf-Pc)','var(Pf)','kurtosis','check','cost'))
    del1 = np.array([])
    del2 = np.array([])
    var1 = np.array([])
    var2 = np.array([])
    kur1 = np.array([])
    chk1 = np.array([])
    cost = np.array([])
    integrand_spawns = integrand.spawn(levels=np.arange(l+1))
    for ll in range(l+1):
        sums = np.zeros(6)
        cst = 0
        integrand_spawn = integrand_spawns[ll]
        for j in range(1,101):
            # evaluate integral at sampleing points samples
            samples = integrand_spawn.discrete_distrib.gen_samples(n=n/100)
            Pc,Pf = integrand_spawn.f(samples)
            dP = Pf-Pc
            sums_j = np.array([
                np.sum(dP),
                np.sum(dP**2),
                np.sum(dP**3),
                np.sum(dP**4),
                np.sum(Pf),
                np.sum(Pf**2),
            ])
            cst_j = integrand_spawn.cost*(n/100)
            sums = sums + sums_j/n
            cst = cst + cst_j/n
        if ll == 0:
            kurt = 0.
        else:
            kurt = ( sums[3] - 4*sums[2]*sums[0] + 6*sums[1]*sums[0]**2 - 
                     3*sums[0]*sums[0]**3 ) /  (sums[1]-sums[0]**2)**2.
        cost = np.hstack((cost, cst))
        del1 = np.hstack((del1, sums[0]))
        del2 = np.hstack((del2, sums[4]))
        var1 = np.hstack((var1, sums[1]-sums[0]**2))
        var2 = np.hstack((var2, sums[5]-sums[4]**2))
        var2 = np.maximum(var2, 1e-10) # fix for cases with var=0
        kur1 = np.hstack((kur1, kurt))
        if ll == 0:
            check = 0
        else:
            check = abs( del1[ll] + del2[ll-1] - del2[ll]) / \
                    ( 3.*( np.sqrt(var1[ll]) + np.sqrt(var2[ll-1]) + np.sqrt(var2[ll]) ) / np.sqrt(n))
        chk1 = np.hstack((chk1, check))

        print('    %-15d%-15.4e%-15.4e%-15.3e%-15.3e%-15.2e%-15.2e%.2e'\
              %(ll,del1[ll],del2[ll],var1[ll],var2[ll],kur1[ll],chk1[ll],cst))
    # print out a warning if kurtosis or consistency check looks bad
    if kur1[-1] > 100.:
        print('WARNING: kurtosis on finest level = %f'%kur1[-1])
        print(' indicates MLMC correction dominated by a few rare paths;')
        print(' for information on the connection to variance of sample variances,')
        print(' see http://mathworld.wolfram.com/SampleVarianceDistribution.html\n')
    if np.max(chk1) > 1.:
        print('WARNING: maximum consistency error = %f'%max(chk1))
        print(' indicates identity E[Pf-Pc] = E[Pf] - E[Pc] not satisfied;')
        print(' to be more certain, re-run mlmc_test with larger N\n')
    # use linear regression to estimate alpha, beta and gamma
    l1 = 2
    l2 = l+1
    x = np.ones((l2+1-l1,2))
    x[:,1] = np.arange(l1,l2+1)
    pa = np.linalg.lstsq(x,np.log2(np.absolute(del1[(l1-1):l2])),rcond=None)[0]
    alpha = -pa[1]
    pb = np.linalg.lstsq(x,np.log2(np.absolute(var1[(l1-1):l2])),rcond=None)[0]
    beta = -pb[1]
    pg = np.linalg.lstsq(x,np.log2(np.absolute(cost[(l1-1):l2])),rcond=None)[0]
    gamma = pg[1]
    print('Linear regression estimates of MLMC parameters')
    print('    alpha = %f  (exponent for MLMC weak convergence)'%alpha)
    print('    beta  = %f  (exponent for MLMC variance)'%beta)
    print('    gamma = %f  (exponent for MLMC cost)'%gamma)
    #second, mlmc complexity tests
    print('MLMC complexity tests')
    print('    %-15s%-15s%-15s%-15s%-15s%s'\
        %('rmse_tol','value','mlmc_cost','std_cost','savings','N_l'))
    alpha = np.maximum(alpha,0.5)
    beta  = np.maximum(beta,0.5)
    theta = 0.25
    for i in range(len(rmse_tols)):
        mlmc_qmcpy = qp.CubMLMC(integrand,
            rmse_tol = rmse_tols[i],
            n_init = n_init,
            levels_min = levels_min,
            levels_max = levels_max,
            alpha0 = alpha,
            beta0 = beta,
            gamma0 = gamma)
        sol,data = mlmc_qmcpy.integrate()
        p = data.solution
        nl = data.n_level
        cl = data.cost_per_sample
        mlmc_cost = sum(nl*cl)
        idx = np.minimum(len(var2),len(nl))-1
        std_cost = var2[idx]*cl[-1] / ((1.-theta)*rmse_tols[i]**2)
        print('    %-15.3e%-15.3e%-15.3e%-15.3e%-15.2f%s'\
            %(rmse_tols[i], p, mlmc_cost, std_cost, std_cost/mlmc_cost,''.join('%-13d'%nli for nli in nl)))

Shift Invariant Ops

util.bernoulli_poly

\(n^\text{th}\) Bernoulli polynomial

Examples:

>>> x = np.arange(6).reshape((2,3))/6
>>> available_n = list(BERNOULLIPOLYSDICT.keys())
>>> available_n
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> for n in available_n:
...     y = bernoulli_poly(n,x)
...     with np.printoptions(precision=2):
...         print("n = %d\n%s"%(n,y))
n = 1
[[-0.5  -0.33 -0.17]
 [ 0.    0.17  0.33]]
n = 2
[[ 0.17  0.03 -0.06]
 [-0.08 -0.06  0.03]]
n = 3
[[ 0.    0.05  0.04]
 [ 0.   -0.04 -0.05]]
n = 4
[[-0.03 -0.01  0.02]
 [ 0.03  0.02 -0.01]]
n = 5
[[ 0.00e+00 -2.19e-02 -2.06e-02]
 [ 1.39e-17  2.06e-02  2.19e-02]]
n = 6
[[ 0.02  0.01 -0.01]
 [-0.02 -0.01  0.01]]
n = 7
[[ 0.00e+00  2.28e-02  2.24e-02]
 [-1.39e-17 -2.24e-02 -2.28e-02]]
n = 8
[[-0.03 -0.02  0.02]
 [ 0.03  0.02 -0.02]]
n = 9
[[ 0.   -0.04 -0.04]
 [ 0.    0.04  0.04]]
n = 10
[[ 0.08  0.04 -0.04]
 [-0.08 -0.04  0.04]]
>>> import scipy.special
>>> for n in available_n:
...     bpoly_coeffs = BERNOULLIPOLYSDICT[n].coeffs
...     bpoly_coeffs_true = scipy.special.bernoulli(n)*scipy.special.comb(n,np.arange(n,-1,-1))
...     assert np.allclose(bpoly_coeffs_true,bpoly_coeffs,atol=1e-12)

Parameters:

Name Type Description Default
n int

Polynomial order.

required
x Union[ndarray, Tensor]

Points at which to evaluate the Bernoulli polynomial.

required

Returns:

Name Type Description
y Union[ndarray, Tensor]

Bernoulli polynomial values.

Source code in qmcpy/util/shift_invar_ops.py
def bernoulli_poly(n, x):
    r"""
    $n^\text{th}$ Bernoulli polynomial

    Examples:
        >>> x = np.arange(6).reshape((2,3))/6
        >>> available_n = list(BERNOULLIPOLYSDICT.keys())
        >>> available_n
        [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
        >>> for n in available_n:
        ...     y = bernoulli_poly(n,x)
        ...     with np.printoptions(precision=2):
        ...         print("n = %d\n%s"%(n,y))
        n = 1
        [[-0.5  -0.33 -0.17]
         [ 0.    0.17  0.33]]
        n = 2
        [[ 0.17  0.03 -0.06]
         [-0.08 -0.06  0.03]]
        n = 3
        [[ 0.    0.05  0.04]
         [ 0.   -0.04 -0.05]]
        n = 4
        [[-0.03 -0.01  0.02]
         [ 0.03  0.02 -0.01]]
        n = 5
        [[ 0.00e+00 -2.19e-02 -2.06e-02]
         [ 1.39e-17  2.06e-02  2.19e-02]]
        n = 6
        [[ 0.02  0.01 -0.01]
         [-0.02 -0.01  0.01]]
        n = 7
        [[ 0.00e+00  2.28e-02  2.24e-02]
         [-1.39e-17 -2.24e-02 -2.28e-02]]
        n = 8
        [[-0.03 -0.02  0.02]
         [ 0.03  0.02 -0.02]]
        n = 9
        [[ 0.   -0.04 -0.04]
         [ 0.    0.04  0.04]]
        n = 10
        [[ 0.08  0.04 -0.04]
         [-0.08 -0.04  0.04]]
        >>> import scipy.special
        >>> for n in available_n:
        ...     bpoly_coeffs = BERNOULLIPOLYSDICT[n].coeffs
        ...     bpoly_coeffs_true = scipy.special.bernoulli(n)*scipy.special.comb(n,np.arange(n,-1,-1))
        ...     assert np.allclose(bpoly_coeffs_true,bpoly_coeffs,atol=1e-12)

    Args:
        n (int): Polynomial order.
        x (Union[np.ndarray,torch.Tensor]): Points at which to evaluate the Bernoulli polynomial.

    Returns:
        y (Union[np.ndarray,torch.Tensor]): Bernoulli polynomial values.
    """
    assert isinstance(n,int)
    assert n in BERNOULLIPOLYSDICT, "n = %d not in BERNOULLIPOLYSDICT"%n
    bpoly = BERNOULLIPOLYSDICT[n]
    y = bpoly(x) 
    return y

Digitally Shift Invariant Ops

util.weighted_walsh_funcs

Weighted walsh functions

\[\sum_{k=0}^\infty \mathrm{wal}_k(x) 2^{-\mu_\alpha(k)}\]

where \(\mathrm{wal}_k\) is the \(k^\text{th}\) Walsh function and \(\mu_\alpha\) is the Dick weight function which sums the first \(\alpha\) largest indices of \(1\) bits in the binary expansion of \(k\) e.g. \(k=13=1101_2\) has 1-bit indexes \((4,3,1)\) so

\[\mu_1(k) = 4, \mu_2(k) = 4+3, \mu_3(k) = 4+3+1 = \mu_4(k) = \mu_5(k) = \dots\]

Examples:

>>> t = 3 
>>> rng = np.random.Generator(np.random.SFC64(11))
>>> xb = rng.integers(low=0,high=2**t,size=(2,3))
>>> available_alpha = list(WEIGHTEDWALSHFUNCSPOS.keys())
>>> available_alpha
[2, 3, 4]
>>> for alpha in available_alpha:
...     y = weighted_walsh_funcs(alpha,xb,t)
...     with np.printoptions(precision=2):
...         print("alpha = %d\n%s"%(alpha,y))
alpha = 2
[[1.81 1.38 1.81]
 [0.62 1.81 2.5 ]]
alpha = 3
[[1.85 1.43 1.85]
 [0.62 1.85 2.39]]
alpha = 4
[[1.85 1.43 1.85]
 [0.62 1.85 2.38]]
>>> import torch
>>> for alpha in available_alpha:
...     y = weighted_walsh_funcs(alpha,torch.from_numpy(xb),t)
...     with torch._tensor_str.printoptions(precision=2):
...         print("alpha = %d\n%s"%(alpha,y))
alpha = 2
tensor([[1.81, 1.38, 1.81],
        [0.62, 1.81, 2.50]])
alpha = 3
tensor([[1.85, 1.43, 1.85],
        [0.62, 1.85, 2.39]])
alpha = 4
tensor([[1.85, 1.43, 1.85],
        [0.62, 1.85, 2.38]])

Parameters:

Name Type Description Default
alpha int

Weighted walsh functions order.

required
xb Union[ndarray, Tensor]

Jnteger points at which to evaluate the weighted Walsh function.

required
t int

Number of bits in each integer in xb.

required

Returns:

Name Type Description
y Union[ndarray, Tensor]

Weighted Walsh function values.

References:

  1. Dick, Josef.
    "Walsh spaces containing smooth functions and quasi–Monte Carlo rules of arbitrary high order."
    SIAM Journal on Numerical Analysis 46.3 (2008): 1519-1553.

  2. Dick, Josef.
    "The decay of the Walsh coefficients of smooth functions."
    Bulletin of the Australian Mathematical Society 80.3 (2009): 430-453.

Source code in qmcpy/util/dig_shift_invar_ops.py
def weighted_walsh_funcs(alpha, xb, t):
    r"""
    Weighted walsh functions 

    $$\sum_{k=0}^\infty \mathrm{wal}_k(x) 2^{-\mu_\alpha(k)}$$ 

    where $\mathrm{wal}_k$ is the $k^\text{th}$ Walsh function 
    and $\mu_\alpha$ is the Dick weight function which sums the first $\alpha$ largest indices of $1$ bits in the binary expansion of $k$ 
    e.g. $k=13=1101_2$ has 1-bit indexes $(4,3,1)$ so 

    $$\mu_1(k) = 4, \mu_2(k) = 4+3, \mu_3(k) = 4+3+1 = \mu_4(k) = \mu_5(k) = \dots$$

    Examples:
        >>> t = 3 
        >>> rng = np.random.Generator(np.random.SFC64(11))
        >>> xb = rng.integers(low=0,high=2**t,size=(2,3))
        >>> available_alpha = list(WEIGHTEDWALSHFUNCSPOS.keys())
        >>> available_alpha
        [2, 3, 4]
        >>> for alpha in available_alpha:
        ...     y = weighted_walsh_funcs(alpha,xb,t)
        ...     with np.printoptions(precision=2):
        ...         print("alpha = %d\n%s"%(alpha,y))
        alpha = 2
        [[1.81 1.38 1.81]
         [0.62 1.81 2.5 ]]
        alpha = 3
        [[1.85 1.43 1.85]
         [0.62 1.85 2.39]]
        alpha = 4
        [[1.85 1.43 1.85]
         [0.62 1.85 2.38]]
        >>> import torch
        >>> for alpha in available_alpha:
        ...     y = weighted_walsh_funcs(alpha,torch.from_numpy(xb),t)
        ...     with torch._tensor_str.printoptions(precision=2):
        ...         print("alpha = %d\n%s"%(alpha,y))
        alpha = 2
        tensor([[1.81, 1.38, 1.81],
                [0.62, 1.81, 2.50]])
        alpha = 3
        tensor([[1.85, 1.43, 1.85],
                [0.62, 1.85, 2.39]])
        alpha = 4
        tensor([[1.85, 1.43, 1.85],
                [0.62, 1.85, 2.38]])

    Args:
        alpha (int): Weighted walsh functions order.
        xb (Union[np.ndarray,torch.Tensor]): Jnteger points at which to evaluate the weighted Walsh function.
        t (int): Number of bits in each integer in xb.

    returns:
        y (Union[np.ndarray,torch.Tensor]): Weighted Walsh function values.

    **References:**

    1.  Dick, Josef.  
        "Walsh spaces containing smooth functions and quasi–Monte Carlo rules of arbitrary high order."  
        SIAM Journal on Numerical Analysis 46.3 (2008): 1519-1553.

    2.  Dick, Josef.  
        "The decay of the Walsh coefficients of smooth functions."  
        Bulletin of the Australian Mathematical Society 80.3 (2009): 430-453.    
    """
    assert isinstance(alpha,int)
    assert alpha in WEIGHTEDWALSHFUNCSPOS, "alpha = %d not in WEIGHTEDWALSHFUNCSPOS"%alpha
    assert alpha in WEIGHTEDWALSHFUNCSZEROS, "alpha = %d not in WEIGHTEDWALSHFUNCSZEROS"%alpha
    if isinstance(xb,np.ndarray):
        np_or_torch = np 
        y = np.ones(xb.shape) 
    else:
        import torch 
        np_or_torch = torch 
        y = torch.ones(xb.shape,device=xb.device)
    pidxs = xb>0
    y[~pidxs] = WEIGHTEDWALSHFUNCSZEROS[alpha]
    xfpidxs = (2**(-t))*xb[pidxs]
    betapidxs = -np_or_torch.floor(np_or_torch.log2(xfpidxs))
    y[pidxs] = WEIGHTEDWALSHFUNCSPOS[alpha](betapidxs,xfpidxs,xb[pidxs],t)
    return y

util.k4sumterm

\[K_4(x) = \sum_{a=0}^{t-1} \frac{x_a}{2^{3a}}\]

where \(x_a\) is the bit at index \(a\) in the binary expansion of \(x\) e.g. \(x = 6\) with \(t=3\) has \((x_0,x_1,x_2) = (1,1,0)\)

Examples:

>>> t = 3
>>> rng = np.random.Generator(np.random.SFC64(11))
>>> x = rng.integers(low=0,high=2**t,size=(5,4))
>>> with np.printoptions(precision=2):
...     k4sumterm(x,t)
array([[ 1.11,  0.89,  1.11, -0.89],
       [ 1.11,  1.14,  1.11, -0.86],
       [-0.89,  0.86, -1.11,  0.89],
       [-1.11,  0.89, -0.89,  0.89],
       [-1.14, -0.89, -0.89, -0.86]])
>>> import torch 
>>> with torch._tensor_str.printoptions(precision=2):
...     k4sumterm(torch.from_numpy(x),t)
tensor([[ 1.11,  0.89,  1.11, -0.89],
        [ 1.11,  1.14,  1.11, -0.86],
        [-0.89,  0.86, -1.11,  0.89],
        [-1.11,  0.89, -0.89,  0.89],
        [-1.14, -0.89, -0.89, -0.86]])

Parameters:

Name Type Description Default
x Union[np.ndarray torch.Tensor]

Integer arrays.

required
t int

Number of bits in each integer.

required

Returns:

Name Type Description
y Union[np.ndarray torch.Tensor]

The \(K_4\) sum term.

Source code in qmcpy/util/dig_shift_invar_ops.py
def k4sumterm(x, t, cutoff=1e-8):
    r""" 
    $$K_4(x) = \sum_{a=0}^{t-1} \frac{x_a}{2^{3a}}$$

    where $x_a$ is the bit at index $a$ in the binary expansion of $x$
    e.g. $x = 6$ with $t=3$ has $(x_0,x_1,x_2) = (1,1,0)$

    Examples:
        >>> t = 3
        >>> rng = np.random.Generator(np.random.SFC64(11))
        >>> x = rng.integers(low=0,high=2**t,size=(5,4))
        >>> with np.printoptions(precision=2):
        ...     k4sumterm(x,t)
        array([[ 1.11,  0.89,  1.11, -0.89],
               [ 1.11,  1.14,  1.11, -0.86],
               [-0.89,  0.86, -1.11,  0.89],
               [-1.11,  0.89, -0.89,  0.89],
               [-1.14, -0.89, -0.89, -0.86]])
        >>> import torch 
        >>> with torch._tensor_str.printoptions(precision=2):
        ...     k4sumterm(torch.from_numpy(x),t)
        tensor([[ 1.11,  0.89,  1.11, -0.89],
                [ 1.11,  1.14,  1.11, -0.86],
                [-0.89,  0.86, -1.11,  0.89],
                [-1.11,  0.89, -0.89,  0.89],
                [-1.14, -0.89, -0.89, -0.86]])

    Args:
        x (Union[np.ndarray torch.Tensor]): Integer arrays.
        t (int): Number of bits in each integer.

    Returns:
        y (Union[np.ndarray torch.Tensor]): The $K_4$ sum term.
    """
    total = 0.
    for a in range(0,t):
        factor = 1/float(2.**(3*a))
        if factor<cutoff: break
        total += (-1.)**((x>>(t-a-1))&1)*factor
    return total

util.to_float

Convert binary representations of digital net samples in base \(b=2\) to floating point representations.

Examples:

>>> xb = np.arange(8,dtype=np.uint64)
>>> xb 
array([0, 1, 2, 3, 4, 5, 6, 7], dtype=uint64)
>>> to_float(xb,3)
array([0.   , 0.125, 0.25 , 0.375, 0.5  , 0.625, 0.75 , 0.875])
>>> xbtorch = bin_from_numpy_to_torch(xb)
>>> xbtorch
tensor([0, 1, 2, 3, 4, 5, 6, 7])
>>> to_float(xbtorch,3)
tensor([0.0000, 0.1250, 0.2500, 0.3750, 0.5000, 0.6250, 0.7500, 0.8750])

Parameters:

Name Type Description Default
x Union[ndarray, Tensor]

binary representation of samples with dtype either np.uint64 or torch.int64.

required
t int

number of bits in binary represtnations. Typically dnb2.t where isinstance(dnb2,DigitalNetB2).

required

Returns:

Name Type Description
xf Unioin[ndarray, Tensor]

floating point representation of samples.

Source code in qmcpy/util/dig_shift_invar_ops.py
def to_float(x, t):
    r"""
    Convert binary representations of digital net samples in base $b=2$ to floating point representations.

    Examples:
        >>> xb = np.arange(8,dtype=np.uint64)
        >>> xb 
        array([0, 1, 2, 3, 4, 5, 6, 7], dtype=uint64)
        >>> to_float(xb,3)
        array([0.   , 0.125, 0.25 , 0.375, 0.5  , 0.625, 0.75 , 0.875])
        >>> xbtorch = bin_from_numpy_to_torch(xb)
        >>> xbtorch
        tensor([0, 1, 2, 3, 4, 5, 6, 7])
        >>> to_float(xbtorch,3)
        tensor([0.0000, 0.1250, 0.2500, 0.3750, 0.5000, 0.6250, 0.7500, 0.8750])

    Args:
        x (Union[np.ndarray,torch.Tensor]): binary representation of samples with `dtype` either `np.uint64` or `torch.int64`.
        t (int): number of bits in binary represtnations. Typically `dnb2.t` where `isinstance(dnb2,DigitalNetB2)`.

    Returns: 
        xf (Unioin[np.ndarray,torch.Tensor]): floating point representation of samples.  
    """
    npt = get_npt(x)
    if npt==np: # npt==torch
        if x.dtype==np.uint64: 
            return x.astype(np.float64)*2.**(-t)
        elif npt.is_floating_point(x):
            return x 
        else: 
            raise ParameterError("x.dtype must be np.uint64, got %s"%str(x.dtype))
    else:
        if x.dtype==npt.int64: 
            return x.to(npt.get_default_dtype())*2.**(-t) 
        elif npt.is_floating_point(x):
            return x 
        else:
            raise ParameterError("x.dtype must be torch.int64, got %s"%str(x.dtype))

util.to_bin

Convert floating point representations of digital net samples in base \(b=2\) to binary representations.

Examples:

>>> xf = np.random.Generator(np.random.PCG64(7)).uniform(low=0,high=1,size=(5))
>>> xf 
array([0.62509547, 0.8972138 , 0.77568569, 0.22520719, 0.30016628])
>>> xb = to_bin(xf,2)
>>> xb
array([2, 3, 3, 0, 1], dtype=uint64)
>>> to_bin(xb,2) 
array([2, 3, 3, 0, 1], dtype=uint64)
>>> import torch 
>>> xftorch = torch.from_numpy(xf) 
>>> xftorch
tensor([0.6251, 0.8972, 0.7757, 0.2252, 0.3002], dtype=torch.float64)
>>> xbtorch = to_bin(xftorch,2)
>>> xbtorch
tensor([2, 3, 3, 0, 1])
>>> to_bin(xbtorch,2)
tensor([2, 3, 3, 0, 1])

Parameters:

Name Type Description Default
x Union[ndarray, Tensor]

floating point representation of samples.

required
t int

number of bits in binary represtnations. Typically dnb2.t where isinstance(dnb2,DigitalNetB2).

required

Returns:

Name Type Description
xb Unioin[ndarray, Tensor]

binary representation of samples with dtype either np.uint64 or torch.int64.

Source code in qmcpy/util/dig_shift_invar_ops.py
def to_bin(x, t):
    r"""
    Convert floating point representations of digital net samples in base $b=2$ to binary representations.

    Examples:
        >>> xf = np.random.Generator(np.random.PCG64(7)).uniform(low=0,high=1,size=(5))
        >>> xf 
        array([0.62509547, 0.8972138 , 0.77568569, 0.22520719, 0.30016628])
        >>> xb = to_bin(xf,2)
        >>> xb
        array([2, 3, 3, 0, 1], dtype=uint64)
        >>> to_bin(xb,2) 
        array([2, 3, 3, 0, 1], dtype=uint64)
        >>> import torch 
        >>> xftorch = torch.from_numpy(xf) 
        >>> xftorch
        tensor([0.6251, 0.8972, 0.7757, 0.2252, 0.3002], dtype=torch.float64)
        >>> xbtorch = to_bin(xftorch,2)
        >>> xbtorch
        tensor([2, 3, 3, 0, 1])
        >>> to_bin(xbtorch,2)
        tensor([2, 3, 3, 0, 1])


    Args:
        x (Union[np.ndarray,torch.Tensor]): floating point representation of samples. 
        t (int): number of bits in binary represtnations. Typically `dnb2.t` where `isinstance(dnb2,DigitalNetB2)`.

    Returns: 
        xb (Unioin[np.ndarray,torch.Tensor]): binary representation of samples with `dtype` either `np.uint64` or `torch.int64`. 
    """
    npt = get_npt(x)
    if npt==np:
        if npt.issubdtype(x.dtype,npt.floating):
            return npt.floor((x%1)*2.**t).astype(npt.uint64)
        elif npt.issubdtype(x.dtype,npt.integer):
            return x
        else:
            raise ParameterError("x.dtype must be float or int, got %s"%str(x.dtype))
    else: # npt==torch
        if npt.is_floating_point(x):
            return npt.floor((x%1)*2.**t).to(npt.int64)
        elif (not npt.is_floating_point(x)) and (not npt.is_complex(x)): # int type 
            return x 
        else :
            raise ParameterError("x.dtype must be float or int, got %s"%str(x.dtype))
    return xb

util.bin_from_numpy_to_torch

Convert numpy.uint64 to torch.int64, useful for converting binary samples from DigitalNetB2 to torch representations.

Examples:

>>> xb = np.arange(8,dtype=np.uint64)
>>> xb 
array([0, 1, 2, 3, 4, 5, 6, 7], dtype=uint64)
>>> bin_from_numpy_to_torch(xb)
tensor([0, 1, 2, 3, 4, 5, 6, 7])

Parameters:

Name Type Description Default
xb Union[ndarray]

binary representation of samples with dtype=np.uint64

required

Returns:

Name Type Description
xbtorch Unioin[Tensor]

binary representation of samples with dtype=torch.int64.

Source code in qmcpy/util/dig_shift_invar_ops.py
def bin_from_numpy_to_torch(xb):
    r"""
    Convert `numpy.uint64` to `torch.int64`, useful for converting binary samples from `DigitalNetB2` to torch representations.

    Examples:
        >>> xb = np.arange(8,dtype=np.uint64)
        >>> xb 
        array([0, 1, 2, 3, 4, 5, 6, 7], dtype=uint64)
        >>> bin_from_numpy_to_torch(xb)
        tensor([0, 1, 2, 3, 4, 5, 6, 7])

    Args:
        xb (Union[np.ndarray]): binary representation of samples with `dtype=np.uint64`

    Returns: 
        xbtorch (Unioin[torch.Tensor]): binary representation of samples with `dtype=torch.int64`.  
    """
    assert xb.dtype==np.uint64
    assert xb.max()<=(2**63-1), "require all xb < 2^63"
    import torch
    return torch.from_numpy(xb.astype(np.int64))