We can learn the weights of an SPN for generative problems using hard EM learning.
import libspn as spn
import tensorflow as tf
indicator_leaves = spn.IndicatorLeaf(
num_vars=2, num_vals=2, name="indicator_x")
# Generate random structure with 1 decomposition per product layer
# 2 subsets of variables per product (so 2 children) and 2 sums/mixtures per scope
dense_spn_generator = spn.DenseSPNGenerator(num_decomps=1, num_subsets=2, num_mixtures=2)
root = dense_spn_generator.generate(indicator_leaves)
# Connect a latent indicator
indicator_y = root.generate_latent_indicators(name="indicator_y") # Can be added manually
# Generate weights
spn.generate_weights(root, initializer=tf.initializers.random_uniform()) # Can be added manually
The visualization below uses graphviz
. Depending on your setup (e.g. jupyter lab
vs. jupyter notebook
) this might fail to show. At least Chrome
+ jupyter notebook
seems to work.
# Visualize SPN graph
spn.display_spn_graph(root)
indicator_x_data = [[0,0],[0,0],[1,1],[1,1],[1,1],[0,1],[0,1],[0,1]]
indicator_y_data =[[-1]] * len(indicator_x_data)
hard_em_learning = spn.HardEMLearning(root=root)
update_op = hard_em_learning.accumulate_and_update_weights()
llh_op = tf.reduce_mean(root.get_log_value())
num_epochs = 20
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
likelihoods = []
for epoch in range(num_epochs):
likelihood, _ = sess.run(
[llh_op, update_op],
feed_dict={indicator_leaves: indicator_x_data, indicator_y: indicator_y_data}
)
likelihoods.append(likelihood)
print("Avg. Likelihood: %s" % (likelihood,))
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('ggplot')
plt.plot(likelihoods)
plt.show()