简体   繁体   English

带有 TensorFlow Probability 的 Edward2 的简单哈密顿蒙特卡罗示例

[英]Simple Hamiltonian Monte Carlo Example with TensorFlow Probability's Edward2

Edward example爱德华的例子

As Edward is deprecated and requires an older version of TensorFlow one can create a dedicated virtual environment for the following example由于Edward已弃用并且需要旧版本的 TensorFlow,因此可以为以下示例创建专用的虚拟环境

$ python3 --version
Python 3.6.8
$ python3 -m venv edward
$ source edward/bin/activate
(edward) $ pip3 install --upgrade pip setuptools wheel
(edward) $ cat edward.txt
tensorflow==1.7
edward~=1.3
scipy~=1.2
pandas~=0.24
matplotlib~=3.0
(edward) $ pip3 install -r edward.txt

I have a very simple minimal working example of using Hamiltonian Monte Carlo with Edward called edward_old.py我有一个非常简单的最小工作示例,用于将哈密顿蒙特卡罗与爱德华一起使用,名为edward_old.py

#!/usr/bin/env python3

import numpy as np
import scipy.stats
import tensorflow as tf
import edward as ed
import pandas as pd
import matplotlib.pyplot as plt


def generate_samples(data, n_samples):
    # Pick initial point for MCMC chains based on the data
    low, med, high = np.percentile(data, (16, 50, 84))
    mu_init = np.float32(med)
    t_init = np.float32(np.log(0.5 * (high - low)))

    # Build a very simple model
    mu = ed.models.Uniform(-1.0, 1.0)
    t = ed.models.Uniform(*np.log((0.05, 1.0), dtype=np.float32))
    X = ed.models.Normal(
        loc=tf.fill(data.shape, mu), scale=tf.fill(data.shape, tf.exp(t))
    )

    # Emperical samples of a sclar
    q_mu = ed.models.Empirical(params=tf.Variable(tf.fill((n_samples,), mu_init)))
    q_t = ed.models.Empirical(params=tf.Variable(tf.fill((n_samples,), t_init)))

    # Run inference using HMC to generate samples.
    with tf.Session() as sess:
        inference = ed.HMC({mu: q_mu, t: q_t}, data={X: data})
        inference.run(step_size=0.01, n_steps=10)
        mu_samples, t_samples = sess.run([q_mu.params, q_t.params])
    return mu_samples, t_samples


def visualize(samples, mu_grid, sigma_grid):
    fig, ax = plt.subplots(1, 1, figsize=(6, 5))
    ax.scatter(samples['mu'], samples['sigma'], s=5, lw=0, c='black')
    ax.set_xlim(mu_grid[0], mu_grid[-1])
    ax.set_ylim(sigma_grid[0], sigma_grid[-1])
    ax.set_title('Edward')
    ax.set_xlabel('$\mu$')
    ax.set_ylabel('$\sigma$')
    plt.savefig('edward_old.pdf')


def main():
    np.random.seed(0)
    tf.set_random_seed(0)

    # Generate pseudodata from draws from a single normal distribution
    dist_mean = 0.0
    dist_std = 0.5
    n_events = 5000
    toy_data = scipy.stats.norm.rvs(dist_mean, dist_std, size=n_events)

    mu_samples, t_samples = generate_samples(toy_data, n_events)
    samples = pd.DataFrame({'mu': mu_samples, 'sigma': np.exp(t_samples)})

    n_grid = 50
    mu_grid = np.linspace(*np.percentile(mu_samples, (0.5, 99.5)), n_grid)
    sigma_grid = np.linspace(*np.exp(np.percentile(t_samples, (0.5, 99.5))), n_grid)
    visualize(samples, mu_grid, sigma_grid)


if __name__ == '__main__':
    main()

which produces the plot below through它通过产生下面的情节

(edward) $ python3 edward_old.py

在此处输入图片说明

Edward2 example Edward2 示例

However, when I try to replicate it using TensorFlow Probability and Edward2 with the following environment但是,当我尝试在以下环境中使用TensorFlow ProbabilityEdward2复制它时

$ python3 --version
Python 3.6.8
$ python3 -m venv tfp-edward2
$ source tfp-edward2/bin/activate
(tfp-edward2) $ pip3 install --upgrade pip setuptools wheel
(tfp-edward2) $ cat tfp-edward2.txt
tensorflow~=1.13
tensorflow-probability~=0.6
scipy~=1.2
pandas~=0.24
matplotlib~=3.0
(tfp-edward2) $ pip3 install -r tfp-edward2.txt

and the following changes from edward_old.py 's generate_samples in a file called edward2.py以及edward_old.pygenerate_samples在一个名为edward2.py的文件中的以下更改

#!/usr/bin/env python3

import numpy as np
import scipy.stats
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability import edward2 as ed
import pandas as pd
import matplotlib.pyplot as plt


def generate_samples(data, n_samples):
    # Pick initial point for MCMC chains based on the data
    low, med, high = np.percentile(data, (16, 50, 84))
    mu_init = np.float32(med)
    t_init = np.float32(np.log(0.5 * (high - low)))

    def model(data_shape):
        mu = ed.Uniform(
            low=tf.fill(data_shape, -1.0), high=tf.fill(data_shape, 1.0), name="mu"
        )
        t = ed.Uniform(
            low=tf.log(tf.fill(data_shape, 0.05)),
            high=tf.log(tf.fill(data_shape, 1.0)),
            name="t",
        )
        x = ed.Normal(loc=mu, scale=tf.exp(t), name="x")
        return x

    log_joint = ed.make_log_joint_fn(model)

    def target_log_prob_fn(mu, t):
        """Target log-probability as a function of states."""
        return log_joint(data.shape, mu=mu, t=t, x=data)

    step_size = tf.get_variable(
        name='step_size',
        initializer=0.01,
        use_resource=True,  # For TFE compatibility
        trainable=False,
    )

    num_burnin_steps = 1000

    hmc_kernel = tfp.mcmc.HamiltonianMonteCarlo(
        target_log_prob_fn=target_log_prob_fn,
        num_leapfrog_steps=5,
        step_size=step_size,
        step_size_update_fn=tfp.mcmc.make_simple_step_size_update_policy(
            num_adaptation_steps=int(num_burnin_steps * 0.8)
        ),
    )

    # How should these be done?
    q_mu = tf.random_normal(data.shape, mean=mu_init)
    q_t = tf.random_normal(data.shape, mean=t_init)

    states, kernel_results = tfp.mcmc.sample_chain(
        num_results=n_samples,
        current_state=[q_mu, q_t],
        kernel=hmc_kernel,
        num_burnin_steps=num_burnin_steps,
    )

    # Initialize all constructed variables.
    init_op = tf.global_variables_initializer()

    # Run the inference using HMC to generate samples
    with tf.Session() as sess:
        init_op.run()
        states_, results_ = sess.run([states, kernel_results])

    mu_samples, t_samples = states_[0][0], states_[1][0]
    return mu_samples, t_samples

Running with运行

(tfp-edward2) $ python3 edward2.py

shows that there are some obvious problems.表明存在一些明显的问题。 I don't think I am formulating the equivalent of the ed.models.Empirical correctly, so if there are thoughts on that or anything else I am doing wrong that would be great.我不认为我正确地制定了ed.models.Empirical的等价物,因此,如果对此有任何想法或其他任何我做错的事情,那就太好了。

I have tried to follow the " Upgrading from Edward to Edward2 " examples already, but I haven't been able to understand them enough to transfer from the example used there of the deep_exponential_family model to this example.我已经尝试遵循“ 从 Edward 升级到 Edward2 ”示例,但我无法理解它们,无法从deep_exponential_family模型中使用的示例转移到此示例。

The problem I created for myself was totally messing up the shapes of my distributions.我为自己创造的问题完全弄乱了我的分布的形状。 What I failed to properly grasp at first was that the current_state of my tfp.mcmc.sample_chain should have been scalars ( shape==() ) that represented the initial positions of the chains.起初我未能正确理解的是,我的tfp.mcmc.sample_chaincurrent_state应该是代表链初始位置的标量( shape==() )。 Once I realized this, then it became clear that those positions, q_mu and q_t had quite the wrong shape and should be the mean of samples from the positions determined from the data一旦我意识到这一点,那么很明显,这些位置q_muq_t的形状非常错误,应该是来自数据确定的位置的样本的平均值

q_mu = tf.reduce_mean(tf.random_normal((1000,), mean=mu_init))
q_t = tf.reduce_mean(tf.random_normal((1000,), mean=t_init))

As these values are scalars, then I had been creating the shapes of my model wrong as well.由于这些值是标量,因此我也一直在错误地创建模型的形状。 I had been creating samples of my random variables that were the same shape as my data mistakenly thinking that this was just moving the shaping of x to the shape of mu and t .我一直在创建与我的数据形状相同的随机变量样本,错误地认为这只是将x的形状移动到mut的形状。 Of course mu and t are meant to be scalar random variables from their respective Uniform distributions which then are the parameters for x 's Normal distribution from which data.shape samples are drawn.当然mut是来自它们各自均匀分布的标量随机变量,然后是x的正态分布的参数, data.shape抽取data.shape样本。

def model(data_shape):
    mu = ed.Uniform(low=-1.0, high=1.0, name="mu")
    t = ed.Uniform(low=tf.log(0.05), high=tf.log(1.0), name="t")
    x = ed.Normal(
        loc=tf.fill(data_shape, mu), scale=tf.fill(data_shape, tf.exp(t)), name="x"
    )
    return x

Once this is done the only thing left to do is to properly access the states now完成此操作后,唯一要做的就是现在正确访问状态

with tf.Session() as sess:
    init_op.run()
    states_, results_ = sess.run([states, kernel_results])
    mu_samples, t_samples = (states_[0], states_[1])

and that produces the image below with并产生下面的图像

(tfp-edward2) $ python3 edward2.py

which is a good match for the original using Edward .这与使用Edward的原始版本非常匹配。

在此处输入图片说明

The fully corrected script is below完全更正的脚本如下

#!/usr/bin/env python3

import numpy as np
import scipy.stats
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability import edward2 as ed
import pandas as pd
import matplotlib.pyplot as plt


def generate_samples(data, n_samples):
    # Pick initial point for MCMC chains based on the data
    low, med, high = np.percentile(data, (16, 50, 84))
    mu_init = np.float32(med)
    t_init = np.float32(np.log(0.5 * (high - low)))

    def model(data_shape):
        mu = ed.Uniform(low=-1.0, high=1.0, name="mu")
        t = ed.Uniform(low=tf.log(0.05), high=tf.log(1.0), name="t")
        x = ed.Normal(
            loc=tf.fill(data_shape, mu), scale=tf.fill(data_shape, tf.exp(t)), name="x"
        )
        return x

    log_joint = ed.make_log_joint_fn(model)

    def target_log_prob_fn(mu, t):
        """Target log-probability as a function of states."""
        return log_joint(data.shape, mu=mu, t=t, x=data)

    step_size = tf.get_variable(
        name='step_size',
        initializer=0.01,
        use_resource=True,  # For TFE compatibility
        trainable=False,
    )

    num_burnin_steps = 1000

    hmc_kernel = tfp.mcmc.HamiltonianMonteCarlo(
        target_log_prob_fn=target_log_prob_fn,
        num_leapfrog_steps=5,
        step_size=step_size,
        step_size_update_fn=tfp.mcmc.make_simple_step_size_update_policy(
            num_adaptation_steps=int(num_burnin_steps * 0.8)
        ),
    )

    # Initial states of chains
    q_mu = tf.reduce_mean(tf.random_normal((1000,), mean=mu_init))
    q_t = tf.reduce_mean(tf.random_normal((1000,), mean=t_init))

    states, kernel_results = tfp.mcmc.sample_chain(
        num_results=n_samples,
        current_state=[q_mu, q_t],
        kernel=hmc_kernel,
        num_burnin_steps=num_burnin_steps,
    )

    # Initialize all constructed variables.
    init_op = tf.global_variables_initializer()

    # Run the inference using HMC to generate samples
    with tf.Session() as sess:
        init_op.run()
        states_, results_ = sess.run([states, kernel_results])
        mu_samples, t_samples = (states_[0], states_[1])

    return mu_samples, t_samples


def visualize(samples, mu_grid, sigma_grid):
    fig, ax = plt.subplots(1, 1, figsize=(6, 5))
    ax.scatter(samples['mu'], samples['sigma'], s=5, lw=0, c='black')
    ax.set_xlim(mu_grid[0], mu_grid[-1])
    ax.set_ylim(sigma_grid[0], sigma_grid[-1])
    ax.set_title('tfp and Edward2')
    ax.set_xlabel('$\mu$')
    ax.set_ylabel('$\sigma$')
    plt.savefig('tfp-edward2.pdf')
    plt.savefig('tfp-edward2.png')


def main():
    np.random.seed(0)
    tf.set_random_seed(0)

    # Generate pseudodata from draws from a single normal distribution
    dist_mean = 0.0
    dist_std = 0.5
    n_events = 5000
    toy_data = scipy.stats.norm.rvs(dist_mean, dist_std, size=n_events)

    mu_samples, t_samples = generate_samples(toy_data, n_events)
    samples = pd.DataFrame({'mu': mu_samples, 'sigma': np.exp(t_samples)})

    n_grid = 50
    mu_grid = np.linspace(*np.percentile(mu_samples, (0.5, 99.5)), n_grid)
    sigma_grid = np.linspace(*np.exp(np.percentile(t_samples, (0.5, 99.5))), n_grid)
    visualize(samples, mu_grid, sigma_grid)


if __name__ == '__main__':
    main()

声明:本站的技术帖子网页,遵循CC BY-SA 4.0协议,如果您需要转载,请注明本站网址或者原文地址。任何问题请咨询:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM