├── .idea
├── misc.xml
└── vcs.xml
├── README.md
├── compute_ot.py
├── costs.py
├── data_generator.py
├── estimate_map.py
├── losses.py
├── models.py
├── run.sh
├── visualize_map.py
└── viz
├── entropy
├── Fx.png
├── XnFx.png
└── XnY.png
└── l2
├── Fx.png
├── XnFx.png
└── XnY.png
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## large-scale-OT-mapping-TF
2 |
3 | Tensorflow Implementation of the following paper:
4 | ```
5 | Title:
6 | Large-Scale Optimal Transport and Mapping Estimation
7 | Authors:
8 | Seguy, Vivien; Bhushan Damodaran, Bharath; Flamary, Rémi; Courty, Nicolas; Rolet, Antoine; Blondel, Mathieu
9 | Publication:
10 | eprint arXiv:1711.02283
11 | Publication Date:
12 | 11/2017
13 | Origin:
14 | ARXIV
15 | Keywords:
16 | Statistics - Machine Learning
17 | Comment:
18 | 10 pages, 4 figures
19 | Bibliographic Code:
20 | 2017arXiv171102283S
21 | ```
22 | [on arXiv](https://arxiv.org/abs/1711.02283)
23 |
24 | [on OpenReview](https://openreview.net/forum?id=B1zlp1bRW)
25 |
26 | ### Some notes
27 |
28 | - This repository does not contain an implementation of the entire experiment of the paper. Instead,
29 | it confirms the thesis's core algorithm in a small toy example.
30 |
31 | - Unlike the original paper, total batch-wise optimization is not implemented but I believe that it makes little difference.
32 |
33 | - To run experiments, run `run.sh`.
34 |
35 | - L2 regularization generally looks better than entropic regularization.
36 |
37 | - Epsilon is quiet sensitive and important hyper-parameter. In my toy example, `eps = 0.01` looks reasonable choice.
38 |
39 | ### Requirements
40 | ```
41 | python3
42 | tensorflow
43 | matplotlib
44 | seaborn
45 | ...
46 | ```
47 |
48 | ## Results (on L2 regularization)
49 |
50 | ##### Source and Target
51 | 
52 |
53 | Source points are green and target points are red.
54 |
55 | ##### Monge Map Estimation
56 | 
57 |
58 | Source points are green and transported points are blue.
59 |
60 | ##### KDE on transported distribution
61 | 
62 |
63 | #### Author
64 | @mikigom (Junghoon Seo, Satrec Initiative)
65 |
66 | sjh@satreci.com
67 |
--------------------------------------------------------------------------------
/compute_ot.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import tensorflow as tf
4 |
5 | import data_generator
6 | from models import NN_DUAL
7 | from losses import reg_ot_dual
8 |
9 | flags = tf.app.flags
10 | flags.DEFINE_integer("n_epoch", 20000, "Epoch to train [20000]")
11 | flags.DEFINE_integer("n_batch_size", 512, "Batch size to train [512]")
12 | flags.DEFINE_string("reg_type", 'l2', "Regularization Type")
13 | flags.DEFINE_float("learning_rate", 0.005, "Learning rate of optimizer [0.005]")
14 | FLAGS = flags.FLAGS
15 |
16 |
17 | class Trainer(object):
18 | def __init__(self):
19 | self.x_generator = None
20 | self.y_generator = None
21 | self.x = None
22 | self.y = None
23 | self.u = None
24 | self.v = None
25 | self.ux = None
26 | self.vy = None
27 | self.u_var_list = None
28 | self.v_var_list = None
29 | self.loss = None
30 | self.step = None
31 | self.step_inc = None
32 | self.u_opt = None
33 | self.v_opt = None
34 | self.ckpt_dir = 'ckpts/stochastic_ot_computation/'
35 | self.summary_writer = None
36 | self.summary_op = None
37 | self.saver = None
38 | self.sess = None
39 | self.coord = None
40 | self.threads = None
41 |
42 | self.define_dataset()
43 | self.define_model()
44 | self.define_loss()
45 | self.define_optim()
46 | self.define_writer_and_summary()
47 | self.define_saver()
48 | self.initialize_session_and_etc()
49 |
50 | def define_dataset(self):
51 | self.x_generator = iter(data_generator.GeneratorGaussian1(FLAGS.n_batch_size))
52 | self.y_generator = iter(data_generator.GeneratorGaussians4(FLAGS.n_batch_size))
53 | self.x = tf.placeholder(tf.float32, (None, 2))
54 | self.y = tf.placeholder(tf.float32, (None, 2))
55 |
56 | def define_model(self):
57 | self.u = NN_DUAL(self.x, 'u')
58 | self.v = NN_DUAL(self.y, 'v')
59 | self.ux = self.u.output
60 | self.vy = self.v.output
61 | self.u_var_list = self.u.var_list
62 | self.v_var_list = self.v.var_list
63 |
64 | def define_loss(self):
65 | self.loss = tf.reduce_mean(reg_ot_dual(self.ux, self.vy, self.x, self.y, reg_type=FLAGS.reg_type))
66 |
67 | def define_optim(self):
68 | self.step = tf.Variable(0, name='step', trainable=False)
69 | self.step_inc = tf.assign(self.step, self.step + 1)
70 |
71 | optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
72 | # Gradient Ascent
73 | self.u_opt = optimizer.minimize(-self.loss, var_list=self.u_var_list)
74 | self.v_opt = optimizer.minimize(-self.loss, var_list=self.v_var_list)
75 |
76 | def define_writer_and_summary(self):
77 | if not os.path.exists(self.ckpt_dir):
78 | os.makedirs(self.ckpt_dir)
79 |
80 | self.summary_writer = tf.summary.FileWriter(self.ckpt_dir)
81 |
82 | with tf.control_dependencies([self.u_opt, self.v_opt]):
83 | self.summary_op = tf.summary.merge([
84 | tf.summary.scalar('loss', self.loss)
85 | ])
86 |
87 | def define_saver(self):
88 | self.saver = tf.train.Saver()
89 |
90 | def initialize_session_and_etc(self):
91 | gpu_options = tf.GPUOptions(allow_growth=True)
92 | sess_config = tf.ConfigProto(allow_soft_placement=True,
93 | gpu_options=gpu_options)
94 | self.sess = tf.Session(config=sess_config)
95 |
96 | self.sess.run(tf.local_variables_initializer())
97 | self.sess.run(tf.global_variables_initializer())
98 |
99 | self.coord = tf.train.Coordinator()
100 | self.threads = tf.train.start_queue_runners(sess=self.sess, coord=self.coord)
101 |
102 | def train(self):
103 | try:
104 | step = None
105 | while not self.coord.should_stop():
106 | step = self.sess.run(self.step)
107 | if step > FLAGS.n_epoch:
108 | break
109 |
110 | x = next(self.x_generator)
111 | y = next(self.y_generator)
112 |
113 | summary = self.sess.run(self.summary_op, feed_dict={self.x: x, self.y: y})
114 | self.summary_writer.add_summary(summary, step)
115 | self.summary_writer.flush()
116 |
117 | self.sess.run(self.step_inc)
118 |
119 | except KeyboardInterrupt:
120 | print("Interrupted!")
121 | self.coord.request_stop()
122 |
123 | finally:
124 | self.saver.save(self.sess, self.ckpt_dir)
125 | print('Stop')
126 | self.coord.request_stop()
127 | self.coord.join(self.threads)
128 |
129 |
130 | if __name__ == '__main__':
131 | trainer = Trainer()
132 | trainer.train()
133 | print("Done!")
134 |
--------------------------------------------------------------------------------
/costs.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 |
4 | def cost_l2(x, y):
5 | return tf.reduce_sum(tf.square(x - y), axis=1)
6 |
--------------------------------------------------------------------------------
/data_generator.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import random
3 |
4 |
5 | class GeneratorGaussians4(object):
6 | def __init__(self,
7 | batch_size: int=256,
8 | scale: float=2.,
9 | center_coor_min: float=-0.25,
10 | center_coor_max: float=+0.25,
11 | stdev: float=1.414):
12 | self.batch_size = batch_size
13 | self.stdev = stdev
14 | scale = scale
15 | diag_len = np.sqrt(center_coor_min**2 + center_coor_max**2)
16 | centers = [
17 | (center_coor_max / diag_len, center_coor_max / diag_len),
18 | (center_coor_max / diag_len, center_coor_min / diag_len),
19 | (center_coor_min / diag_len, center_coor_max / diag_len),
20 | (center_coor_min / diag_len, center_coor_min / diag_len)
21 | ]
22 | self.centers = [(scale * x, scale * y) for x, y in centers]
23 |
24 | def __iter__(self):
25 | while True:
26 | dataset = []
27 | for i in range(self.batch_size):
28 | point = np.random.randn(2) * .02
29 | center = random.choice(self.centers)
30 | point[0] += center[0]
31 | point[1] += center[1]
32 | dataset.append(point)
33 | dataset = np.array(dataset, dtype='float32')
34 | dataset /= self.stdev
35 | yield dataset
36 |
37 |
38 | class GeneratorGaussian1(object):
39 | def __init__(self,
40 | batch_size=256):
41 | self.batch_size = batch_size
42 |
43 | def __iter__(self):
44 | while True:
45 | yield np.random.multivariate_normal((0, 0), ((0.15, 0), (0, 0.15)), self.batch_size)
46 |
--------------------------------------------------------------------------------
/estimate_map.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import tensorflow as tf
4 |
5 | import data_generator
6 | from models import NN_DUAL, NN_MAP
7 | from losses import primer_dual
8 |
9 | flags = tf.app.flags
10 | flags.DEFINE_integer("n_epoch", 20000, "Epoch to train [20000]")
11 | flags.DEFINE_integer("n_batch_size", 512, "Batch size to train [512]")
12 | flags.DEFINE_string("reg_type", 'l2', "Regularization Type")
13 | flags.DEFINE_float("learning_rate", 0.005, "Learning rate of optimizer [0.005]")
14 | FLAGS = flags.FLAGS
15 |
16 |
17 | class Trainer(object):
18 | def __init__(self):
19 | self.x_generator = None
20 | self.y_generator = None
21 | self.x = None
22 | self.y = None
23 | self.u = None
24 | self.v = None
25 | self.ux = None
26 | self.vy = None
27 | self.f = None
28 | self.fx = None
29 | self.u_var_list = None
30 | self.v_var_list = None
31 | self.f_var_list = None
32 | self.loss = None
33 | self.step = None
34 | self.step_inc = None
35 | self.f_opt = None
36 | self.ckpt_dir_ot = 'ckpts/stochastic_ot_computation/'
37 | self.ckpt_dir_map = 'ckpts/optimal_map_estimation/'
38 | self.summary_writer = None
39 | self.summary_op = None
40 | self.u_saver = None
41 | self.v_saver = None
42 | self.f_saver = None
43 | self.sess = None
44 | self.coord = None
45 | self.threads = None
46 |
47 | self.define_dataset()
48 | self.define_model()
49 | self.define_loss()
50 | self.define_optim()
51 | self.define_writer_and_summary()
52 | self.define_saver()
53 | self.initialize_session_and_etc()
54 |
55 | def define_dataset(self):
56 | self.x_generator = iter(data_generator.GeneratorGaussian1(FLAGS.n_batch_size))
57 | self.y_generator = iter(data_generator.GeneratorGaussians4(FLAGS.n_batch_size))
58 | self.x = tf.placeholder(tf.float32, (None, 2))
59 | self.y = tf.placeholder(tf.float32, (None, 2))
60 |
61 | def define_model(self):
62 | self.u = NN_DUAL(self.x, 'u')
63 | self.v = NN_DUAL(self.y, 'v')
64 | self.ux = self.u.output
65 | self.vy = self.v.output
66 | self.u_var_list = self.u.var_list
67 | self.v_var_list = self.v.var_list
68 |
69 | self.f = NN_MAP(self.x, 'f')
70 | self.fx = self.f.output
71 | self.f_var_list = self.f.var_list
72 |
73 | def define_loss(self):
74 | self.loss = tf.reduce_mean(primer_dual(self.fx, self.ux, self.vy, self.x, self.y, reg_type=FLAGS.reg_type))
75 |
76 | def define_optim(self):
77 | self.step = tf.Variable(0, name='step', trainable=False)
78 | self.step_inc = tf.assign(self.step, self.step + 1)
79 |
80 | optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
81 | self.f_opt = optimizer.minimize(self.loss, var_list=self.f_var_list)
82 |
83 | def define_writer_and_summary(self):
84 | if not os.path.exists(self.ckpt_dir_ot):
85 | os.makedirs(self.ckpt_dir_ot)
86 |
87 | if not os.path.exists(self.ckpt_dir_map):
88 | os.makedirs(self.ckpt_dir_map)
89 |
90 | self.summary_writer = tf.summary.FileWriter(self.ckpt_dir_map)
91 |
92 | with tf.control_dependencies([self.f_opt]):
93 | self.summary_op = tf.summary.merge([
94 | tf.summary.scalar('loss', self.loss)
95 | ])
96 |
97 | def define_saver(self):
98 | self.u_saver = tf.train.Saver(self.u_var_list)
99 | self.v_saver = tf.train.Saver(self.v_var_list)
100 | self.f_saver = tf.train.Saver(self.f_var_list)
101 |
102 | def initialize_session_and_etc(self):
103 | gpu_options = tf.GPUOptions(allow_growth=True)
104 | sess_config = tf.ConfigProto(allow_soft_placement=True,
105 | gpu_options=gpu_options)
106 | self.sess = tf.Session(config=sess_config)
107 |
108 | self.sess.run(tf.local_variables_initializer())
109 | self.sess.run(tf.global_variables_initializer())
110 |
111 | self.u_saver.restore(self.sess, self.ckpt_dir_ot)
112 | self.v_saver.restore(self.sess, self.ckpt_dir_ot)
113 |
114 | self.coord = tf.train.Coordinator()
115 | self.threads = tf.train.start_queue_runners(sess=self.sess, coord=self.coord)
116 |
117 | def train(self):
118 | try:
119 | step = None
120 | while not self.coord.should_stop():
121 | step = self.sess.run(self.step)
122 | if step > FLAGS.n_epoch:
123 | break
124 |
125 | x = next(self.x_generator)
126 | y = next(self.y_generator)
127 |
128 | summary = self.sess.run(self.summary_op, feed_dict={self.x: x, self.y: y})
129 | self.summary_writer.add_summary(summary, step)
130 | self.summary_writer.flush()
131 |
132 | self.sess.run(self.step_inc)
133 |
134 | except KeyboardInterrupt:
135 | print("Interrupted!")
136 | self.coord.request_stop()
137 |
138 | finally:
139 | self.f_saver.save(self.sess, self.ckpt_dir_map)
140 | print('Stop')
141 | self.coord.request_stop()
142 | self.coord.join(self.threads)
143 |
144 |
145 | if __name__ == '__main__':
146 | trainer = Trainer()
147 | trainer.train()
148 | print("Done!")
149 |
--------------------------------------------------------------------------------
/losses.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | from costs import cost_l2
3 |
4 | __eps__ = .01
5 |
6 |
7 | def F_entropy(ux, vy, x, y, c=cost_l2, eps=__eps__):
8 | return -eps*tf.exp((ux + vy - c(x, y))/eps)
9 |
10 |
11 | def H_entropy(ux, vy, x, y, c=cost_l2, eps=__eps__):
12 | return tf.exp((ux + vy - c(x, y))/eps)
13 |
14 |
15 | def F_l2(ux, vy, x, y, c=cost_l2, eps=__eps__):
16 | return (-1/(4 * eps)) * (tf.nn.relu(ux + vy - c(x, y)))**2
17 |
18 |
19 | def H_l2(ux, vy, x, y, c=cost_l2, eps=__eps__):
20 | return (1 / (2 * eps)) * tf.nn.relu(ux + vy - c(x, y))
21 |
22 |
23 | def reg_ot_dual(ux, vy, x, y, c=cost_l2, eps=__eps__, reg_type='entropy'):
24 | if reg_type == 'entropy':
25 | return ux + vy + F_entropy(ux, vy, x, y, c, eps)
26 | elif reg_type == 'l2':
27 | return ux + vy + F_l2(ux, vy, x, y, c, eps)
28 |
29 |
30 | def primer_dual(fx, ux, vy, x, y, c=cost_l2, eps=__eps__, reg_type='entropy'):
31 | # f(x) in Y
32 | if reg_type == 'entropy':
33 | return c(y, fx) * H_entropy(ux, vy, x, y, c, eps)
34 | elif reg_type == 'l2':
35 | return c(y, fx) * H_l2(ux, vy, x, y, c, eps)
36 |
--------------------------------------------------------------------------------
/models.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | slim = tf.contrib.slim
3 |
4 | __leaky_relu_alpha__ = 0.2
5 |
6 |
7 | def __leaky_relu__(x, alpha=__leaky_relu_alpha__, name='Leaky_ReLU'):
8 | return tf.maximum(x, alpha*x, name=name)
9 |
10 |
11 | class NN_DUAL(object):
12 | def __init__(self, input_, vs_name, reuse=False):
13 | self.input = input_
14 | self.vs_name = vs_name
15 | self.reuse = reuse
16 | self.output = None
17 | self.var_list = None
18 | self.build_model()
19 |
20 | def build_model(self):
21 | with tf.variable_scope(self.vs_name, reuse=self.reuse) as vs:
22 | with slim.arg_scope([slim.fully_connected],
23 | num_outputs=32,
24 | activation_fn=__leaky_relu__):
25 | fc_1 = slim.fully_connected(self.input)
26 | fc_2 = slim.fully_connected(fc_1)
27 | fc_3 = slim.fully_connected(fc_2, num_outputs=1, activation_fn=None)
28 |
29 | self.output = fc_3
30 | self.var_list = tf.contrib.framework.get_variables(vs)
31 |
32 |
33 | class NN_MAP(object):
34 | def __init__(self, input_, vs_name, reuse=False):
35 | self.input = input_
36 | self.vs_name = vs_name
37 | self.reuse = reuse
38 | self.output = None
39 | self.var_list = None
40 | self.build_model()
41 |
42 | def build_model(self):
43 | with tf.variable_scope(self.vs_name, reuse=self.reuse) as vs:
44 | with slim.arg_scope([slim.fully_connected],
45 | num_outputs=32,
46 | activation_fn=__leaky_relu__):
47 | fc_1 = slim.fully_connected(self.input)
48 | fc_2 = slim.fully_connected(fc_1)
49 | fc_3 = slim.fully_connected(fc_2, num_outputs=2, activation_fn=None)
50 |
51 | self.output = fc_3
52 | self.var_list = tf.contrib.framework.get_variables(vs)
53 |
54 |
55 | if __name__ == '__main__':
56 | pass
57 |
--------------------------------------------------------------------------------
/run.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | python3 compute_ot.py
4 | python3 estimate_map.py
5 | python3 visualize_map.py
--------------------------------------------------------------------------------
/visualize_map.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import tensorflow as tf
4 | import matplotlib.pyplot as plt
5 | import seaborn as sns
6 |
7 | import data_generator
8 | from models import NN_MAP
9 |
10 | flags = tf.app.flags
11 | flags.DEFINE_integer("n_batch_size", 512, "Batch size to train [512]")
12 | FLAGS = flags.FLAGS
13 |
14 |
15 | class Trainer(object):
16 | def __init__(self):
17 | self.x_generator = None
18 | self.y_generator = None
19 | self.x = None
20 | self.f = None
21 | self.fx = None
22 | self.f_var_list = None
23 | self.loss = None
24 | self.ckpt_dir_ot = 'ckpts/stochastic_ot_computation/'
25 | self.ckpt_dir_map = 'ckpts/optimal_map_estimation/'
26 | self.visualize_dir_map = 'viz/'
27 | self.f_saver = None
28 | self.sess = None
29 | self.coord = None
30 | self.threads = None
31 |
32 | self.define_dataset()
33 | self.define_model()
34 | self.define_saver()
35 | self.define_viz_dir()
36 | self.initialize_session_and_etc()
37 |
38 | def define_dataset(self):
39 | self.x_generator = iter(data_generator.GeneratorGaussian1(FLAGS.n_batch_size))
40 | self.y_generator = iter(data_generator.GeneratorGaussians4(FLAGS.n_batch_size))
41 | self.x = tf.placeholder(tf.float32, (None, 2))
42 |
43 | def define_model(self):
44 | self.f = NN_MAP(self.x, 'f')
45 | self.fx = self.f.output
46 | self.f_var_list = self.f.var_list
47 |
48 | def define_saver(self):
49 | self.f_saver = tf.train.Saver(self.f_var_list)
50 |
51 | def define_viz_dir(self):
52 | if not os.path.exists(self.visualize_dir_map):
53 | os.makedirs(self.visualize_dir_map)
54 |
55 | def initialize_session_and_etc(self):
56 | gpu_options = tf.GPUOptions(allow_growth=True)
57 | sess_config = tf.ConfigProto(allow_soft_placement=True,
58 | gpu_options=gpu_options)
59 | self.sess = tf.Session(config=sess_config)
60 |
61 | self.sess.run(tf.local_variables_initializer())
62 | self.sess.run(tf.global_variables_initializer())
63 |
64 | self.f_saver.restore(self.sess, self.ckpt_dir_map)
65 |
66 | self.coord = tf.train.Coordinator()
67 | self.threads = tf.train.start_queue_runners(sess=self.sess, coord=self.coord)
68 |
69 | def train(self):
70 | try:
71 | x = next(self.x_generator)
72 | y = next(self.y_generator)
73 |
74 | fx = self.sess.run(self.fx, feed_dict={self.x: x})
75 |
76 | visualize(x, y, fx)
77 |
78 | except KeyboardInterrupt:
79 | print("Interrupted!")
80 | self.coord.request_stop()
81 |
82 | finally:
83 | self.f_saver.save(self.sess, self.ckpt_dir_map)
84 | print('Stop')
85 | self.coord.request_stop()
86 | self.coord.join(self.threads)
87 |
88 |
89 | def visualize(x, y, fx):
90 | plt.scatter(x[:, 0], x[:, 1], s=1, c='g')
91 | plt.scatter(y[:, 0], y[:, 1], s=1, c='r')
92 | plt.xlim(-1.5, +1.5)
93 | plt.ylim(-1.5, +1.5)
94 | plt.savefig('viz/XnY.png')
95 | plt.clf()
96 |
97 | plt.scatter(x[:, 0], x[:, 1], s=1, c='g')
98 | plt.scatter(fx[:, 0], fx[:, 1], s=1, c='b')
99 |
100 | ax = plt.axes()
101 | for i in range(int(x.shape[0]/8)):
102 | ax.arrow(x[i, 0], x[i, 1], fx[i, 0]-x[i, 0], fx[i, 1]-x[i, 1],
103 | head_width=0.03, head_length=0.02, fc='k', ec='k')
104 |
105 | plt.xlim(-1.5, +1.5)
106 | plt.ylim(-1.5, +1.5)
107 |
108 | plt.savefig('viz/XnFx.png')
109 | plt.clf()
110 |
111 | fig = sns.jointplot(fx[:, 0], fx[:, 1], kind='kde')
112 | fig.savefig('viz/Fx.png')
113 |
114 |
115 | if __name__ == '__main__':
116 | trainer = Trainer()
117 | trainer.train()
118 | print("Done!")
119 |
--------------------------------------------------------------------------------
/viz/entropy/Fx.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mikigom/large-scale-OT-mapping-TF/ac5fa4edb3fb98facb782214d3dc4a5188ebc352/viz/entropy/Fx.png
--------------------------------------------------------------------------------
/viz/entropy/XnFx.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mikigom/large-scale-OT-mapping-TF/ac5fa4edb3fb98facb782214d3dc4a5188ebc352/viz/entropy/XnFx.png
--------------------------------------------------------------------------------
/viz/entropy/XnY.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mikigom/large-scale-OT-mapping-TF/ac5fa4edb3fb98facb782214d3dc4a5188ebc352/viz/entropy/XnY.png
--------------------------------------------------------------------------------
/viz/l2/Fx.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mikigom/large-scale-OT-mapping-TF/ac5fa4edb3fb98facb782214d3dc4a5188ebc352/viz/l2/Fx.png
--------------------------------------------------------------------------------
/viz/l2/XnFx.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mikigom/large-scale-OT-mapping-TF/ac5fa4edb3fb98facb782214d3dc4a5188ebc352/viz/l2/XnFx.png
--------------------------------------------------------------------------------
/viz/l2/XnY.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mikigom/large-scale-OT-mapping-TF/ac5fa4edb3fb98facb782214d3dc4a5188ebc352/viz/l2/XnY.png
--------------------------------------------------------------------------------