├── .gitignore
├── README.md
├── akaitsuki-slow
├── config.py
├── feed_dict.pbtxt
├── feed_dict.py
└── main.py
├── autotune
├── README.md
├── autograd_lib.py
├── autograd_lib_test.py
├── autograd_test.py
├── ciresan_bench.py
├── curvature_test.py
├── eval_conv2d_approx.py
├── factored_test.py
├── globals.py
├── hessian_test.py
├── linalg_bench.py
├── linesearch_test_disabled.py
├── lyapunov_test.py
├── mnist_end2end_test.py
├── plotting_test.py
├── pytorch_benchmark.py
├── scipy_benchmark.py
├── svd_benchmark.py
├── test
│ ├── bad_sigmas.pt
│ ├── factored.pt
│ └── gesvd_crash.txt
├── train_ciresan.py
├── train_ciresan_cca.py
├── train_ciresan_factored.py
├── train_ciresan_new.py
├── train_medium.py
├── train_small.py
├── train_small_xent.py
├── train_small_xent_factored.py
├── train_tiny.py
├── train_tiny_xent.py
├── util.py
└── util_test.py
├── aws-recipes.ipynb
├── aws-scratch.ipynb
├── benchmark_huggingface_predict.py
├── bin
└── tfversion
├── clipping-profile.ipynb
├── cluster
├── .gitignore
├── README.md
├── async_adder.py
├── aws.py
├── benchmark_grpc_recv.py
├── benchmarks
│ ├── .DS_Store
│ ├── .gitignore
│ ├── LICENSE
│ ├── README.md
│ ├── bower_components
│ │ ├── d3
│ │ │ ├── .bower.json
│ │ │ ├── .gitattributes
│ │ │ ├── CONTRIBUTING.md
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ ├── bower.json
│ │ │ ├── d3.js
│ │ │ ├── d3.min.js
│ │ │ └── package.js
│ │ └── plottable
│ │ │ ├── .bower.json
│ │ │ ├── bower.json
│ │ │ ├── plottable.css
│ │ │ ├── plottable.d.ts
│ │ │ ├── plottable.js
│ │ │ └── plottable.min.js
│ ├── dashboard_app
│ │ ├── .DS_Store
│ │ ├── app.yaml
│ │ ├── main.py
│ │ ├── main_test.py
│ │ ├── requirements.txt
│ │ ├── static
│ │ │ ├── css
│ │ │ │ └── style.css
│ │ │ └── js
│ │ │ │ └── benchmark_latency_chart.js
│ │ └── templates
│ │ │ ├── index.html
│ │ │ └── test.html
│ ├── index.html
│ ├── js
│ │ ├── csv_benchmark_chart.js
│ │ └── latency_chart.js
│ ├── scripts
│ │ ├── Dockerfile.tf_cnn_benchmarks
│ │ ├── benchmark_configs.yml
│ │ ├── tf_cnn_benchmarks
│ │ │ ├── .DS_Store
│ │ │ ├── README.md
│ │ │ ├── benchmark_cnn.py
│ │ │ ├── benchmark_storage.py
│ │ │ ├── cbuild_benchmark_storage.py
│ │ │ ├── cnn_util.py
│ │ │ ├── convnet_builder.py
│ │ │ ├── datasets.py
│ │ │ ├── models
│ │ │ │ ├── __init__.py
│ │ │ │ ├── alexnet_model.py
│ │ │ │ ├── densenet_model.py
│ │ │ │ ├── googlenet_model.py
│ │ │ │ ├── inception_model.py
│ │ │ │ ├── lenet_model.py
│ │ │ │ ├── model.py
│ │ │ │ ├── model_config.py
│ │ │ │ ├── overfeat_model.py
│ │ │ │ ├── resnet_model.py
│ │ │ │ ├── trivial_model.py
│ │ │ │ └── vgg_model.py
│ │ │ ├── preprocessing.py
│ │ │ ├── tf_cnn_benchmarks.py
│ │ │ └── variable_mgr.py
│ │ └── util
│ │ │ ├── __init__.py
│ │ │ ├── benchmark_util.py
│ │ │ ├── benchmark_util_test.py
│ │ │ ├── convert_csv_to_json.py
│ │ │ └── convert_csv_to_json_test.py
│ ├── soumith_benchmarks.html
│ └── tools
│ │ ├── k8s_tensorflow_lib.py
│ │ ├── k8s_tensorflow_test.py
│ │ ├── kubectl_util.py
│ │ ├── kubectl_util_test.py
│ │ └── run_distributed_benchmarks.py
├── client_transfer_benchmark.py
├── cloud-formation-example
│ ├── README.md
│ ├── iam.yaml
│ ├── tensorflow.yaml
│ └── zone.sh
├── connect
├── connect.py
├── delete_placement_groups.py
├── fill_efs.py
├── imagenet64
│ ├── README.md
│ ├── aws.py
│ ├── launch.py
│ ├── requirements.txt
│ └── variable_mgr.py
├── instance_info.py
├── launch_async_adder.py
├── launch_micro.py
├── launch_ray.py
├── launch_simple_tf.py
├── local_distributed_benchmark.py
├── myutil.py
├── ray_add.py
├── simple_distributed.py
├── terminate_instances.py
├── test_aws.py
├── tf-tools
│ ├── .gitignore
│ ├── benchmark
│ │ ├── multi_gpu
│ │ │ ├── advanced_tweaks_compare.sh
│ │ │ ├── image_classification_bench_tests.sh
│ │ │ ├── stats_monitor.sh
│ │ │ ├── test_runner.sh
│ │ │ └── unit_test_stats_monitor.sh
│ │ └── runner
│ │ │ ├── cluster_aws.py
│ │ │ ├── command_builder.py
│ │ │ ├── configs
│ │ │ └── aws
│ │ │ │ ├── multi_server.yaml
│ │ │ │ └── yaroslav.yaml
│ │ │ ├── instance_info.py
│ │ │ ├── launch_experiment.py
│ │ │ ├── test_cluster_aws.py
│ │ │ ├── test_command_builder.py
│ │ │ └── util.py
│ └── install
│ │ ├── aws_amzlinux.md
│ │ └── aws_ubuntu16_04.md
├── tmux.py
└── upload_test.txt
├── conditional_backprop.py
├── configure_tf.sh
├── configure_tf_cpu.sh
├── danjar_peek.py
├── distributed
├── README.md
├── benchmark_grpc_recv.py
└── client_transfer_benchmark.py
├── double_memory_bug.py
├── dynamic_stitch_gpu.py
├── dynamic_stitch_gpu_profile.pbtxt
├── eager_lbfgs
├── .ipynb_checkpoints
│ └── performance-checkpoint.ipynb
├── common_gd.py
├── data
│ ├── short_batch.csv
│ ├── short_eager_batch.csv
│ ├── short_eager_loss.csv
│ ├── short_eager_time.csv
│ ├── short_pytorch_loss.csv
│ └── short_pytorch_time.csv
├── eager_lbfgs.py
├── performance.ipynb
├── pytorch_lbfgs.py
├── run_experiment.py
├── torch_lbfgs.lua
└── util.py
├── enqueue_many_test.py
├── enqueue_many_test_singlerun.py
├── ericyue-slowreader
├── benchmark-batch-noqueuerunners-timeline.json
├── benchmark-batch-noqueuerunners.profile
├── benchmark-batch-noqueuerunners.py
├── benchmark-batch.py
├── benchmark-reader.py
├── benchmark-synthetic-batch.py
├── benchmark-synthetic.py
├── benchmark.py
├── data.zlib
└── profile-batch.py
├── example.png
├── free_gpus.py
├── github_pyfunc_slowness.py
├── gpu-memory-transfer.ipynb
├── gpu_oom.py
├── gpu_svd_bench.py
├── graph_template.py
├── graphvis.png
├── imagenet15-scratch.ipynb
├── input_benchmarks
├── convert_to_records.py
├── fully_connected_feed.py
├── fully_connected_preloaded_var.py
├── fully_connected_reader.py
├── timeline.feed.json
├── timeline.reader.json
└── timeline.var.json
├── inverse_segfault.py
├── jupyter-version.png
├── keras_autoencoder
├── keras_large.py
├── util.py
└── weightnorm.py
├── khatri_rao_benchmark.py
├── lazy_dog.py
├── linalg-benchmark
├── README.md
├── bad_matrix.py
├── benchmark.py
├── environment.yml
├── get_cores_per_socket.py
├── launch.py
├── launch_tensorflow_svd_crash.py
├── requirements.txt
├── results.txt
└── tensorflow_svd_crash.py
├── line_search_example
├── data
│ └── step_lengths_ada.csv
├── line_search_example.py
└── util.py
├── linearize
├── linearize.py
├── linearize_test.py
└── memory_util.py
├── matmul_benchmark.py
├── matmul_benchmark_seq.py
├── matmul_times
├── 1080-float16.csv
├── 1080-float32.csv
├── g3-float16.csv
├── g3-float32.csv
├── nvidia-p3-float16.csv
├── nvidia-p3-float32.csv
├── p2-float16.csv
└── p2-float32.csv
├── mavelin
├── machine1.py
└── machine3.py
├── memory tracking.ipynb
├── memory-probe-examples.ipynb
├── memory-release-check.ipynb
├── natural_gradient_multilayer.py
├── node-merge.ipynb
├── notebook_util.py
├── numpy_initializers
├── kfac_cifar.py
└── util.py
├── parallel_dequeue_test.py
├── phantomjs-tryout.ipynb
├── phantomjs-tryout.js
├── pytorch-hessian.ipynb
├── queue_mismatch.py
├── queues_talk
├── queues.ipynb
└── slides.pdf
├── resnet_8_simple.pbtxt
├── resnet_leak_report.py
├── resnet_leak_report2.py
├── resource_variable_test.py
├── rotations_comparison.py
├── saving memory by using functions.ipynb
├── simple_rewiring.ipynb
├── simple_train.py
├── svd_benchmark.py
├── svd_noconverge.py
├── svd_test.py
├── tensorflow-memory-talk.pdf
├── tf_initializer_bug_report.py
├── tiny_runs
├── qr_test.py
└── tiny_tf.py
└── whitening_util.py
/.gitignore:
--------------------------------------------------------------------------------
1 | /__pycache__
2 | /.ipynb_checkpoints
3 | *#
4 | *~
5 | /linalg-benchmark/.idea/linalg-benchmark.iml
6 | /linalg-benchmark/.idea/misc.xml
7 | /linalg-benchmark/.idea/modules.xml
8 | /linalg-benchmark/.idea/vcs.xml
9 | /linalg-benchmark/.idea/workspace.xml
10 | /linalg-benchmark/.idea
11 | .DS_Store
12 | __pycache__
13 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # stuff
2 |
--------------------------------------------------------------------------------
/akaitsuki-slow/config.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 |
4 | def str2bool(v):
5 | return v.lower() in ('y', 'yes', 't', 'true', '1')
6 |
7 |
8 | def get_args():
9 | parser = argparse.ArgumentParser()
10 | parser.register('type', 'bool', str2bool)
11 |
12 | parser.add_argument('--random_seed',
13 | type=int,
14 | default=1013,
15 | help='Random seed')
16 |
17 | parser.add_argument('--vocab_size',
18 | type=int,
19 | default=10000,
20 | help='Default embed size')
21 |
22 | parser.add_argument('--embed_size',
23 | type=int,
24 | default=128,
25 | help='Default embedding size if embedding_file is not given')
26 |
27 | parser.add_argument('--hidden_size',
28 | type=int,
29 | default=128,
30 | help='Hidden size of RNN units')
31 |
32 | parser.add_argument('--num_labels',
33 | type=int,
34 | default=96,
35 | help='num labels')
36 |
37 | parser.add_argument('--bidir',
38 | type='bool',
39 | default=True,
40 | help='bidir: whether to use a bidirectional RNN')
41 |
42 | parser.add_argument('--num_layers',
43 | type=int,
44 | default=1,
45 | help='Number of RNN layers')
46 |
47 | parser.add_argument('--rnn_type',
48 | type=str,
49 | default='gru',
50 | help='RNN type: lstm or gru (default)')
51 |
52 | parser.add_argument('--batch_size',
53 | type=int,
54 | default=32,
55 | help='Batch size')
56 |
57 | parser.add_argument('--dropout_rate',
58 | type=float,
59 | default=0.2,
60 | help='Dropout rate')
61 |
62 | parser.add_argument('--optimizer',
63 | type=str,
64 | default='sgd',
65 | help='Optimizer: sgd (default) or adam or rmsprop')
66 |
67 | parser.add_argument('--learning_rate', '-lr',
68 | type=float,
69 | default=0.1,
70 | help='Learning rate for SGD')
71 |
72 | return parser.parse_args()
73 |
74 |
--------------------------------------------------------------------------------
/akaitsuki-slow/feed_dict.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tensorflow as tf
3 | from tensorflow.python.client import timeline
4 |
5 |
6 | sess = tf.Session()
7 | a = tf.placeholder(tf.float32)
8 | b = a*2
9 | c0 = sess.run([b], feed_dict={a:2.})
10 |
11 | run_metadata = tf.RunMetadata()
12 | run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
13 | run_options.output_partition_graphs=True
14 |
15 | c0 = sess.run([b], feed_dict={a:2.}, options=run_options,
16 | run_metadata=run_metadata)
17 | with open("feed_dict.pbtxt", "w") as f:
18 | f.write(str(run_metadata))
19 |
--------------------------------------------------------------------------------
/autotune/README.md:
--------------------------------------------------------------------------------
1 | To run tests in this directory
2 |
3 | ```
4 | pytest
5 | ```
6 |
7 | If there's a slow test, you can run this file directly to see timings of individual tests, ie
8 |
9 | ```
10 | python linesearch_test.py
11 | ```
12 |
--------------------------------------------------------------------------------
/autotune/globals.py:
--------------------------------------------------------------------------------
1 | # Module to hold global variables for curvature computation functions.
2 | # This is needed sincne functionality may be split over several modules
3 |
4 | from typing import Optional
5 |
6 | import torch
7 | from torch.utils.tensorboard import SummaryWriter
8 |
9 | event_writer: Optional[SummaryWriter] = None
10 | project_name: Optional[str] = 'train_ciresan' # project name to use for wandb logging
11 | logdir_base: str = '/ncluster/runs'
12 | run_name: Optional[str] = None # run name to use, corresponds to logging dir and wandb run name
13 | logdir: Optional[str] = None # logdir
14 | token_count: int = 0 # TODO(y): rename to global-step. Meaning is context-specific, in case of sequences it's number of tokens
15 |
16 | args = None # global arg values
17 | debug_dump_stats: bool = False # print activations/backprops to console
18 | debug_linalg_crashes: bool = False # save matrices that cause linalg routines to crash
19 |
20 |
21 | # debug_hard_crashes_on_nans: bool = True # crash if encountering NaN
22 |
23 | hacks_disable_hess = False
24 |
25 |
26 | if torch.cuda.is_available():
27 | device = torch.device('cuda')
28 | print("Using GPU")
29 | else:
30 | device = torch.device('cpu')
31 |
32 |
33 | def reset_global_step():
34 | global token_count
35 | token_count = 0
36 |
37 |
38 | def increment_global_step(incr: int):
39 | global token_count
40 | token_count += incr
41 |
42 |
43 | def get_global_step() -> int:
44 | return token_count
45 |
46 |
47 |
--------------------------------------------------------------------------------
/autotune/linalg_bench.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import time
4 | from typing import Optional, Tuple, Callable
5 |
6 | # import torch
7 | import scipy
8 | import torch
9 | from torchcurv.optim import SecondOrderOptimizer
10 |
11 |
12 | import torch.nn as nn
13 |
14 | import util as u
15 |
16 | import numpy as np
17 |
18 | """
19 | MKL version unknown
20 | PyTorch version 1.2.0
21 | Scipy version: 1.2.1
22 | Numpy version: 1.16.4
23 | 1024-by-1024 matrix
24 | 7079.93 linalg.solve_lyapunov
25 | 280.11 linalg.pinvh
26 | 1186.08 linalg.pinv
27 | 49.18 linalg.inv
28 | 118.23 qr
29 | 413.42 svd
30 | """
31 |
32 | class Net(nn.Module):
33 | def __init__(self, d):
34 | super().__init__()
35 | self.w = nn.Linear(d, 1, bias=False)
36 |
37 | def forward(self, x: torch.Tensor):
38 | result = self.w(x)
39 | return result
40 |
41 |
42 | class timeit:
43 | """Decorator to measure length of time spent in the block in millis and log
44 | it to TensorBoard. This function is
45 | """
46 |
47 | def __init__(self, tag=""):
48 | self.tag = tag
49 |
50 | def __enter__(self):
51 | self.start = time.perf_counter()
52 | return self
53 |
54 | def __exit__(self, *args):
55 | self.end = time.perf_counter()
56 | interval_ms = 1000 * (self.end - self.start)
57 | print(f"{interval_ms:8.2f} {self.tag}")
58 |
59 |
60 | def get_mkl_version():
61 | import ctypes
62 | import numpy as np
63 |
64 | # this recipe only works on Linux
65 | try:
66 | ver = np.zeros(199, dtype=np.uint8)
67 | mkl = ctypes.cdll.LoadLibrary("libmkl_rt.so")
68 | mkl.MKL_Get_Version_String(ver.ctypes.data_as(ctypes.c_char_p), 198)
69 | return ver[ver != 0].tostring()
70 | except:
71 | return 'unknown'
72 |
73 |
74 | def print_cpu_info():
75 | ver = 'unknown'
76 | try:
77 | for l in open("/proc/cpuinfo").read().split('\n'):
78 | if 'model name' in l:
79 | ver = l
80 | break
81 | except:
82 | pass
83 |
84 |
85 | def linalg_bench():
86 | if np.__config__.get_info("lapack_mkl_info"):
87 | print("MKL version", get_mkl_version())
88 | else:
89 | print("not using MKL")
90 |
91 | print("PyTorch version", torch.version.__version__)
92 |
93 | print("Scipy version: ", scipy.version.full_version)
94 | print("Numpy version: ", np.version.full_version)
95 |
96 | for d in [1024]:
97 | print(f"{d}-by-{d} matrix")
98 | n = 10000
99 | assert n > 2*d # to prevent singularity
100 | X = np.random.random((d, 10000))
101 | Y = np.random.random((d, 10000))
102 | H = X @ X.T
103 | S = Y @ Y.T
104 |
105 | with timeit(f"linalg.solve_lyapunov"):
106 | result = scipy.linalg.solve_lyapunov(H, S)
107 | #print(result[0,0])
108 |
109 | with timeit(f"linalg.pinvh"):
110 | result = scipy.linalg.pinvh(H)
111 | #print(result[0, 0])
112 |
113 | with timeit(f"linalg.pinv"):
114 | result = scipy.linalg.pinv(H)
115 | #print(result[0, 0])
116 |
117 |
118 | with timeit(f"linalg.inv"):
119 | result = scipy.linalg.inv(H)
120 | #print(result[0, 0])
121 |
122 | with timeit(f"qr"):
123 | result = scipy.linalg.qr(H)
124 | #print(result[0, 0])
125 |
126 | with timeit(f"qr-pivoting"):
127 | result = scipy.linalg.qr(H, pivoting=True)
128 | #print(result[0, 0])
129 |
130 | with timeit(f"svd"):
131 | result = scipy.linalg.svd(H)
132 | #print(result[0, 0])
133 |
134 |
135 |
136 | if __name__ == '__main__':
137 | linalg_bench()
138 |
--------------------------------------------------------------------------------
/autotune/pytorch_benchmark.py:
--------------------------------------------------------------------------------
1 | """
2 | (pytorch_p36) [ec2-user@ip-172-31-6-232 cifar]$ python pytorch_benchmark.py
3 | MKL version b'Intel(R) Math Kernel Library Version 2019.0.4 Product Build 20190411 for Intel(R) 64 architecture applications'
4 | PyTorch version 1.1.0
5 | Scipy version: 1.3.0
6 | Numpy version: 1.16.4
7 | Benchmarking 1024-by-1024 matrix on cuda:0
8 | 882.84 svd
9 | 17.22 inv
10 | 227.04 pinv
11 | 452.77 eig
12 | 227.18 svd
13 |
14 |
15 | Laptop
16 |
17 | MKL version unknown
18 | PyTorch version 1.2.0
19 | Scipy version: 1.2.1
20 | Numpy version: 1.16.4
21 | CPU version: unknown
22 | CPU logical cores: 8
23 | CPU physical cores: 4
24 | CPU physical sockets: 0
25 | Benchmarking 1024-by-1024 matrix on cpu
26 | 170.24 svd
27 | 22.41 inv
28 | 206.70 pinv
29 | 247.92 eig
30 | 180.16 pinverse
31 | 20.08 solve
32 | 124.89 svd
33 | 14.57 inv
34 | 197.24 pinv
35 | 221.06 eig
36 | 213.46 pinverse
37 | 21.75 solve
38 |
39 | """
40 | import os
41 | import sys
42 | import time
43 |
44 | import numpy as np
45 |
46 | import util as u
47 |
48 | import torch
49 |
50 | # from @eamartin
51 | def empty_aligned(n, align):
52 | """Get n bytes of memory wih alignment align."""
53 | a = np.empty(n + (align - 1), dtype=np.float32)
54 | data_align = a.ctypes.data % align
55 | offset = 0 if data_align == 0 else (align - data_align)
56 | return a[offset: offset + n]
57 |
58 |
59 | def benchmark(method):
60 |
61 | start_time = time.time()
62 | times = []
63 |
64 | for i in range(1):
65 | if method == 'svd':
66 | _result = torch.svd(H)
67 | open('/dev/null', 'w').write(str(_result[0]))
68 | elif method == 'inv':
69 | _result = torch.inverse(H)
70 | open('/dev/null', 'w').write(str(_result[0]))
71 | elif method == 'pinv':
72 | _result = u.pinv(H)
73 | open('/dev/null', 'w').write(str(_result[0]))
74 | elif method == 'pinverse':
75 | _result = torch.pinverse(H)
76 | open('/dev/null', 'w').write(str(_result[0]))
77 | elif method == 'eig':
78 | _result = torch.symeig(H, eigenvectors=True)
79 | open('/dev/null', 'w').write(str(_result[0]))
80 | elif method == 'svd':
81 | _result = torch.svd(H)
82 | open('/dev/null', 'w').write(str(_result[0]))
83 | elif method == 'solve':
84 | _result = torch.solve(S, H)
85 | open('/dev/null', 'w').write(str(_result[0]))
86 | else:
87 | assert False
88 | new_time = time.time()
89 | elapsed_time = 1000 * (new_time - start_time)
90 | print(f"{elapsed_time:8.2f} {method}")
91 | start_time = new_time
92 | times.append(elapsed_time)
93 |
94 |
95 | if __name__ == '__main__':
96 | methods = ['svd', 'inv', 'pinv', 'eig', 'pinverse', 'solve']*2
97 |
98 | u.print_version_info()
99 | d = 1024
100 |
101 | x0 = torch.rand(d).reshape((d, 1)).float()
102 |
103 | X = torch.rand((d, 10000))
104 | Y = torch.rand((d, 10000))
105 | H = X @ X.t()
106 | S = Y @ Y.t()
107 |
108 | if torch.cuda.is_available():
109 | [x0, X, Y, H, S] = u.move_to_gpu([x0, X, Y, H, S])
110 |
111 | print(f"Benchmarking {d}-by-{d} matrix on {x0.device}")
112 | for method in methods:
113 | benchmark(method)
114 |
115 | # Other timings: svd
116 | # n=1000 Times: min: 126.04, median: 132.48
117 | # n=2000 Times: min: 573.03, median: 621.49
118 | # n=4096 Times: min: 5586.02, median: 6032.16
119 | # Other timings: inv
120 | # Times: min: 17.87, median: 23.41, mean: 27.90
121 |
--------------------------------------------------------------------------------
/autotune/svd_benchmark.py:
--------------------------------------------------------------------------------
1 | # Fastest way to compute eigenvectors for 4k matrix?
2 | #
3 | # Inverse on i3.metal
4 | # n=4096: 368 ms ± 1.51 ms per loop
5 | #
6 | # Xeon V3 benchmarks:
7 | # n=4096 eigs min: 27758.34, median: 28883.69
8 | # n=4096 gesdd min: 7241.70, median: 8477.95
9 | # n=4096 gesvd min=20487.48, median: 22057.64,
10 | # n=4096 inv min: 556.67, median: 579.25,
11 | # n=4096 linsolve: min: 534.40, median: 558.06, mean: 579.19
12 | #
13 | # Xeon V4:
14 | # n=4096 gesdd min: 5586.02, median: 6032.16
15 | #
16 | #
17 | # i7-5820K CPU @ 3.30GHz
18 | # n=4096 gesdd 7288.02, median: 7397.23, mean: 7478.78
19 | # n=4096 inv 520 msec
20 | #
21 | # after upgrading things
22 | # b'Intel(R) Math Kernel Library Version 2017.0.3 Product Build 20170413 for Intel(R) 64 architecture applications'
23 | # n=4096 inv 1427.54
24 |
25 |
26 | from scipy import linalg # for svd
27 | import numpy as np
28 | import time
29 | import sys
30 |
31 |
32 | # from @eamartin
33 | def empty_aligned(n, align):
34 | """Get n bytes of memory wih alignment align."""
35 | a = np.empty(n + (align - 1), dtype=np.float32)
36 | data_align = a.ctypes.data % align
37 | offset = 0 if data_align == 0 else (align - data_align)
38 | return a[offset : offset + n]
39 |
40 |
41 | def benchmark(method):
42 | n=1024
43 | x_old = np.random.randn(n*n).reshape((n,n)).astype(dtype=np.float32)
44 | x = empty_aligned(n*n, 32).reshape((n, n))
45 | x[:] = x_old
46 | x = x @ x.T
47 |
48 | x0 = np.random.randn(n).reshape((n,1)).astype(dtype=np.float32)
49 |
50 | start_time = time.time()
51 | times = []
52 |
53 | for i in range(1):
54 | if method == 'gesdd':
55 | result = linalg.svd(x)
56 | elif method == 'gesvd':
57 | result = linalg.svd(x, lapack_driver='gesvd')
58 | elif method == 'eigh':
59 | result = linalg.eigh(x)
60 | elif method == 'inv':
61 | result = linalg.inv(x)
62 | elif method == 'inv2':
63 | result = linalg.inv(x, overwrite_a=True)
64 | elif method == 'linsolve':
65 | result = linalg.solve(x, x0)
66 | else:
67 | assert False
68 | new_time = time.time()
69 | elapsed_time = 1000*(new_time - start_time)
70 | print(f"elapsed_time:8.2f} {method}")
71 | start_time = new_time
72 | times.append(elapsed_time)
73 |
74 |
75 | if __name__=='__main__':
76 | methods = ['gesdd', 'gesvd', 'eigh', 'inv', 'inv2', 'linsolve']
77 |
78 | for method in methods:
79 | benchmark(method)
80 |
81 |
82 |
83 |
84 | # Other timings: svd
85 | # n=1000 Times: min: 126.04, median: 132.48
86 | # n=2000 Times: min: 573.03, median: 621.49
87 | # n=4096 Times: min: 5586.02, median: 6032.16
88 | # Other timings: inv
89 | # Times: min: 17.87, median: 23.41, mean: 27.90
90 |
--------------------------------------------------------------------------------
/autotune/test/bad_sigmas.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yaroslavvb/stuff/a8024ead315aa1b5d6976940b3a062178f0e499d/autotune/test/bad_sigmas.pt
--------------------------------------------------------------------------------
/autotune/test/factored.pt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yaroslavvb/stuff/a8024ead315aa1b5d6976940b3a062178f0e499d/autotune/test/factored.pt
--------------------------------------------------------------------------------
/bin/tfversion:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import os
3 | os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
4 | import tensorflow as tf
5 | version=tf.__version__
6 | print("version: %s"%(version,))
7 | commit = tf.__git_version__
8 | print("__git_version__: %s"%(commit,))
9 | # commit looks like this
10 | # 'v1.0.0-65-g4763edf-dirty'
11 | commit = commit.replace("'","")
12 | if commit.endswith('-dirty'):
13 | dirty = True
14 | commit = commit[:-len('-dirty')]
15 | commit=commit.rsplit('-g', 1)[1]
16 | url = 'https://github.com/tensorflow/tensorflow/commit/'+commit
17 | print("Commit %s" %(url,))
--------------------------------------------------------------------------------
/cluster/.gitignore:
--------------------------------------------------------------------------------
1 | /.DS_Store
2 |
--------------------------------------------------------------------------------
/cluster/README.md:
--------------------------------------------------------------------------------
1 | # cluster
2 | train on AWS
3 |
--------------------------------------------------------------------------------
/cluster/benchmarks/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yaroslavvb/stuff/a8024ead315aa1b5d6976940b3a062178f0e499d/cluster/benchmarks/.DS_Store
--------------------------------------------------------------------------------
/cluster/benchmarks/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 |
--------------------------------------------------------------------------------
/cluster/benchmarks/README.md:
--------------------------------------------------------------------------------
1 | # Instructions for adding distributed benchmarks to continuous run:
2 |
3 | 1. You can add your benchmark file under
4 | [tensorflow/benchmarks/scripts](https://github.com/tensorflow/benchmarks/tree/master/scripts) directory. The benchmark should accept `task_index`, `job_name`, `ps_hosts` and `worker_hosts` flags. You can copy-paste the following flag definitions:
5 |
6 | ```python
7 | tf.app.flags.DEFINE_integer("task_index", None, "Task index, should be >= 0.")
8 | tf.app.flags.DEFINE_string("job_name", None, "job name: worker or ps")
9 | tf.app.flags.DEFINE_string("ps_hosts", None, "Comma-separated list of hostname:port pairs")
10 | tf.app.flags.DEFINE_string("worker_hosts", None, "Comma-separated list of hostname:port pairs")
11 | ```
12 | 2. Report benchmark values by calling `store_data_in_json` from your benchmark
13 | code. This function is defined in
14 | [benchmark\_util.py](https://github.com/tensorflow/benchmarks/blob/master/scripts/util/benchmark_util.py).
15 | 3. Create a Dockerfile that sets up dependencies and runs your benchmark. For
16 | example, see [Dockerfile.tf\_cnn\_benchmarks](https://github.com/tensorflow/benchmarks/blob/master/scripts/Dockerfile.tf_cnn_benchmarks).
17 | 4. Add the benchmark to
18 | [benchmark\_configs.yml](https://github.com/tensorflow/benchmarks/blob/master/scripts/benchmark_configs.yml)
19 | * Set `benchmark_name` to a descriptive name for your benchmark and make sure
20 | it is unique.
21 | * Set `worker_count` and `ps_count`.
22 | * Set `docker_file` to the Dockerfile path starting with `benchmarks/`
23 | directory.
24 | * Optionally, you can pass flags to your benchmark by adding `args` list.
25 | 5. Send PR with the changes to annarev.
26 |
27 | Currently running benchmarks:
28 | https://benchmarks-dot-tensorflow-testing.appspot.com/
29 |
30 | For any questions, please contact annarev@google.com.
31 |
--------------------------------------------------------------------------------
/cluster/benchmarks/bower_components/d3/.bower.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "d3",
3 | "version": "3.5.5",
4 | "main": "d3.js",
5 | "scripts": [
6 | "d3.js"
7 | ],
8 | "ignore": [
9 | ".DS_Store",
10 | ".git",
11 | ".gitignore",
12 | ".npmignore",
13 | ".spmignore",
14 | ".travis.yml",
15 | "Makefile",
16 | "bin",
17 | "component.json",
18 | "composer.json",
19 | "index.js",
20 | "lib",
21 | "node_modules",
22 | "package.json",
23 | "src",
24 | "test"
25 | ],
26 | "homepage": "https://github.com/mbostock-bower/d3-bower",
27 | "_release": "3.5.5",
28 | "_resolution": {
29 | "type": "version",
30 | "tag": "v3.5.5",
31 | "commit": "264ea13e4ed8583b37a91f7640aa22fdee6b2f26"
32 | },
33 | "_source": "https://github.com/mbostock-bower/d3-bower.git",
34 | "_target": "3.5.5",
35 | "_originalSource": "d3"
36 | }
--------------------------------------------------------------------------------
/cluster/benchmarks/bower_components/d3/.gitattributes:
--------------------------------------------------------------------------------
1 | bower.json -diff merge=ours
2 | component.json -diff merge=ours
3 | d3.js -diff merge=ours
4 | d3.min.js -diff merge=ours
5 | package.js -diff merge=ours
6 |
--------------------------------------------------------------------------------
/cluster/benchmarks/bower_components/d3/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | **Important:** these GitHub issues are for *bug reports and feature requests only*. Please use [StackOverflow](http://stackoverflow.com/questions/tagged/d3.js) or the [d3-js Google group](https://groups.google.com/d/forum/d3-js) for general help.
4 |
5 | If you’re looking for ways to contribute, please [peruse open issues](https://github.com/mbostock/d3/issues?milestone=&page=1&state=open). The icebox is a good place to find ideas that are not currently in development. If you already have an idea, please check past issues to see whether your idea or a similar one was previously discussed.
6 |
7 | Before submitting a pull request, consider implementing a live example first, say using [bl.ocks.org](http://bl.ocks.org). Real-world use cases go a long way to demonstrating the usefulness of a proposed feature. The more complex a feature’s implementation, the more usefulness it should provide. Share your demo using the #d3js tag on Twitter or by sending it to the [d3-js Google group](https://groups.google.com/d/forum/d3-js).
8 |
9 | If your proposed feature does not involve changing core functionality, consider submitting it instead as a [D3 plugin](https://github.com/d3/d3-plugins). New core features should be for general use, whereas plugins are suitable for more specialized use cases. When in doubt, it’s easier to start with a plugin before “graduating” to core.
10 |
11 | To contribute new documentation or add examples to the gallery, just [edit the Wiki](https://github.com/mbostock/d3/wiki)!
12 |
13 | ## How to Submit a Pull Request
14 |
15 | 1. Click the “Fork” button to create your personal fork of the D3 repository.
16 |
17 | 2. After cloning your fork of the D3 repository in the terminal, run `npm install` to install D3’s dependencies.
18 |
19 | 3. Create a new branch for your new feature. For example: `git checkout -b my-awesome-feature`. A dedicated branch for your pull request means you can develop multiple features at the same time, and ensures that your pull request is stable even if you later decide to develop an unrelated feature.
20 |
21 | 4. The `d3.js` and `d3.min.js` files are built from source files in the `src` directory. _Do not edit `d3.js` directly._ Instead, edit the source files, and then run `make` to build the generated files.
22 |
23 | 5. Use `make test` to run tests and verify your changes. If you are adding a new feature, you should add new tests! If you are changing existing functionality, make sure the existing tests run, or update them as appropriate.
24 |
25 | 6. Sign D3’s [Individual Contributor License Agreement](https://docs.google.com/forms/d/1CzjdBKtDuA8WeuFJinadx956xLQ4Xriv7-oDvXnZMaI/viewform). Unless you are submitting a trivial patch (such as fixing a typo), this form is needed to verify that you are able to contribute.
26 |
27 | 7. Submit your pull request, and good luck!
28 |
--------------------------------------------------------------------------------
/cluster/benchmarks/bower_components/d3/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2010-2015, Michael Bostock
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms, with or without
5 | modification, are permitted provided that the following conditions are met:
6 |
7 | * Redistributions of source code must retain the above copyright notice, this
8 | list of conditions and the following disclaimer.
9 |
10 | * Redistributions in binary form must reproduce the above copyright notice,
11 | this list of conditions and the following disclaimer in the documentation
12 | and/or other materials provided with the distribution.
13 |
14 | * The name Michael Bostock may not be used to endorse or promote products
15 | derived from this software without specific prior written permission.
16 |
17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 | DISCLAIMED. IN NO EVENT SHALL MICHAEL BOSTOCK BE LIABLE FOR ANY DIRECT,
21 | INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22 | BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
24 | OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
25 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
26 | EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 |
--------------------------------------------------------------------------------
/cluster/benchmarks/bower_components/d3/README.md:
--------------------------------------------------------------------------------
1 | # Data-Driven Documents
2 |
3 |
4 |
5 | **D3.js** is a JavaScript library for manipulating documents based on data. **D3** helps you bring data to life using HTML, SVG and CSS. D3’s emphasis on web standards gives you the full capabilities of modern browsers without tying yourself to a proprietary framework, combining powerful visualization components and a data-driven approach to DOM manipulation.
6 |
7 | Want to learn more? [See the wiki.](https://github.com/mbostock/d3/wiki)
8 |
9 | For examples, [see the gallery](https://github.com/mbostock/d3/wiki/Gallery) and [mbostock’s bl.ocks](http://bl.ocks.org/mbostock).
10 |
--------------------------------------------------------------------------------
/cluster/benchmarks/bower_components/d3/bower.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "d3",
3 | "version": "3.5.5",
4 | "main": "d3.js",
5 | "scripts": [
6 | "d3.js"
7 | ],
8 | "ignore": [
9 | ".DS_Store",
10 | ".git",
11 | ".gitignore",
12 | ".npmignore",
13 | ".spmignore",
14 | ".travis.yml",
15 | "Makefile",
16 | "bin",
17 | "component.json",
18 | "composer.json",
19 | "index.js",
20 | "lib",
21 | "node_modules",
22 | "package.json",
23 | "src",
24 | "test"
25 | ]
26 | }
27 |
--------------------------------------------------------------------------------
/cluster/benchmarks/bower_components/d3/package.js:
--------------------------------------------------------------------------------
1 | // Package metadata for Meteor.js.
2 |
3 | Package.describe({
4 | name: "d3js:d3", // http://atmospherejs.com/d3js/d3
5 | summary: "D3 (official): A JavaScript visualization library for HTML and SVG.",
6 | version: "3.5.5",
7 | git: "https://github.com/mbostock/d3.git"
8 | });
9 |
10 | Package.onUse(function(api) {
11 | api.versionsFrom(["METEOR@1.0"]);
12 | api.addFiles("d3.js", "client");
13 | });
14 |
--------------------------------------------------------------------------------
/cluster/benchmarks/bower_components/plottable/.bower.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "plottable",
3 | "description": "A modular charting library built on D3",
4 | "version": "2.2.0",
5 | "main": [
6 | "plottable.js",
7 | "plottable.css"
8 | ],
9 | "typescript": {
10 | "definition": "plottable.d.ts"
11 | },
12 | "license": "MIT",
13 | "ignore": [
14 | "**/*",
15 | "!bower.json",
16 | "!plottable.js",
17 | "!plottable.css",
18 | "!plottable.min.js",
19 | "!plottable.d.ts"
20 | ],
21 | "keywords": [
22 | "plottable",
23 | "plottablejs",
24 | "plottable.js",
25 | "d3",
26 | "data viz",
27 | "chart",
28 | "charts",
29 | "reusable charts",
30 | "visualization",
31 | "scatterplot",
32 | "bar chart",
33 | "plot",
34 | "plots"
35 | ],
36 | "dependencies": {
37 | "d3": "3.5.5"
38 | },
39 | "homepage": "http://plottablejs.org",
40 | "repository": {
41 | "type": "git",
42 | "url": "git://github.com/palantir/plottable.git"
43 | },
44 | "devDependencies": {
45 | "chai": "2.0.0",
46 | "mocha": "2.2.5",
47 | "jQuery": "2.1.0",
48 | "jquery.simulate": "1.2.0",
49 | "requirejs": "2.1.18",
50 | "sinon": "1.16.1"
51 | },
52 | "_release": "2.2.0",
53 | "_resolution": {
54 | "type": "version",
55 | "tag": "v2.2.0",
56 | "commit": "e36001d8b6640cd23599905255d61b4ab58a648d"
57 | },
58 | "_source": "https://github.com/palantir/plottable.git",
59 | "_target": "^2.2.0",
60 | "_originalSource": "plottable",
61 | "_direct": true
62 | }
--------------------------------------------------------------------------------
/cluster/benchmarks/bower_components/plottable/bower.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "plottable",
3 | "description": "A modular charting library built on D3",
4 | "version": "2.2.0",
5 | "main": [
6 | "plottable.js",
7 | "plottable.css"
8 | ],
9 | "typescript": {
10 | "definition": "plottable.d.ts"
11 | },
12 | "license": "MIT",
13 | "ignore": [
14 | "**/*",
15 | "!bower.json",
16 | "!plottable.js",
17 | "!plottable.css",
18 | "!plottable.min.js",
19 | "!plottable.d.ts"
20 | ],
21 | "keywords": [
22 | "plottable",
23 | "plottablejs",
24 | "plottable.js",
25 | "d3",
26 | "data viz",
27 | "chart",
28 | "charts",
29 | "reusable charts",
30 | "visualization",
31 | "scatterplot",
32 | "bar chart",
33 | "plot",
34 | "plots"
35 | ],
36 | "dependencies": {
37 | "d3": "3.5.5"
38 | },
39 | "homepage": "http://plottablejs.org",
40 | "repository": {
41 | "type": "git",
42 | "url": "git://github.com/palantir/plottable.git"
43 | },
44 | "devDependencies": {
45 | "chai": "2.0.0",
46 | "mocha": "2.2.5",
47 | "jQuery": "2.1.0",
48 | "jquery.simulate": "1.2.0",
49 | "requirejs": "2.1.18",
50 | "sinon": "1.16.1"
51 | }
52 | }
53 |
--------------------------------------------------------------------------------
/cluster/benchmarks/dashboard_app/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/yaroslavvb/stuff/a8024ead315aa1b5d6976940b3a062178f0e499d/cluster/benchmarks/dashboard_app/.DS_Store
--------------------------------------------------------------------------------
/cluster/benchmarks/dashboard_app/app.yaml:
--------------------------------------------------------------------------------
1 | runtime: python
2 | env: flex
3 | entrypoint: gunicorn -b :$PORT main:app
4 | service: benchmarks
5 |
6 | runtime_config:
7 | python_version: 3
8 |
--------------------------------------------------------------------------------
/cluster/benchmarks/dashboard_app/main_test.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 |
16 | import json
17 | import main
18 | import unittest
19 | import urllib
20 |
21 | class TestMain(unittest.TestCase):
22 |
23 | def testArgumentInvalidFormat(self):
24 | self.assertEqual('', main.argument_name(''))
25 | self.assertEqual('', main.argument_name('arg=val'))
26 | self.assertEqual('', main.argument_name('-arg=val'))
27 | self.assertEqual('', main.argument_name('--argval'))
28 | self.assertEqual('', main.argument_name('--=val'))
29 | self.assertEqual('', main.argument_name('--='))
30 |
31 | def testArgumentValidFormat(self):
32 | self.assertEqual('abc', main.argument_name('--abc=123'))
33 | self.assertEqual('a', main.argument_name('--a=123'))
34 |
35 | def testIndexPage(self):
36 | main.app.testing = True
37 | client = main.app.test_client()
38 |
39 | r = client.get('/')
40 | self.assertEqual(200, r.status_code)
41 | self.assertIn('sample_logged_benchmark', r.data.decode('utf-8'))
42 |
43 | def testTestPage_InvalidTest(self):
44 | main.app.testing = True
45 | client = main.app.test_client()
46 |
47 | r = client.get('/test/abc')
48 | self.assertEqual(200, r.status_code)
49 | self.assertIn('No data for benchmark', str(r.data))
50 |
51 | def testTestPage_SampleTest(self):
52 | main.app.testing = True
53 | client = main.app.test_client()
54 | sample_benchmark_name = '//tensorflow/examples/benchmark:sample_logged_benchmark'
55 |
56 | r = client.get(
57 | '/test/%252F%252Ftensorflow%252Fexamples%252Fbenchmark%253Asample_logged_benchmark')
58 | self.assertEqual(200, r.status_code)
59 | self.assertIn(
60 | 'Performance plots for %s' % sample_benchmark_name, str(r.data))
61 |
62 | def testFetchBenchmarkData_InvalidTest(self):
63 | main.app.testing = True
64 | client = main.app.test_client()
65 |
66 | r = client.get('/benchmark_data/?test=abc&entry=cde')
67 | self.assertEqual(200, r.status_code)
68 | self.assertEqual(b'[]', r.data)
69 |
70 | def testFetchBenchmarkData_SampleTest(self):
71 | main.app.testing = True
72 | client = main.app.test_client()
73 |
74 | encoded_benchmark_name = (
75 | '/test/%252F%252Ftensorflow%252Fexamples%252Fbenchmark%253Asample_logged_benchmark')
76 | r = client.get('/benchmark_data/?test=%s&entry=SampleBenchmark.sum_wall_time' %
77 | encoded_benchmark_name)
78 | self.assertEqual(200, r.status_code)
79 | self.assertEqual(b'[]', r.data)
80 |
81 |
82 | if __name__ == '__main__':
83 | unittest.main()
84 |
--------------------------------------------------------------------------------
/cluster/benchmarks/dashboard_app/requirements.txt:
--------------------------------------------------------------------------------
1 | Flask==0.12.2
2 | gunicorn==19.7.1
3 | google-cloud
4 |
--------------------------------------------------------------------------------
/cluster/benchmarks/dashboard_app/static/css/style.css:
--------------------------------------------------------------------------------
1 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software
10 | distributed under the License is distributed on an "AS IS" BASIS,
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | See the License for the specific language governing permissions and
13 | limitations under the License.
14 | ==============================================================================
15 | */
16 |
17 | body {
18 | font-family: roboto, sans-serif;
19 | }
20 |
21 | h2 {
22 | font-weight: 400;
23 | }
24 |
25 | em {
26 | color: #666666;
27 | font-size: 18px;
28 | font-style: normal;
29 | }
30 |
31 | .outer_div {
32 | max-width: 1000px;
33 | margin: 20px;
34 | }
35 |
36 | table, th, td {
37 | border-collapse: collapse;
38 | border: 1px solid #d9d9d9;
39 | }
40 |
41 | th, td {
42 | padding: 15px;
43 | }
44 |
45 | th {
46 | text-align: left;
47 | font-weight: normal;
48 | }
49 |
50 | ul {
51 | width: 100%;
52 | margin: 0;
53 | padding: 0;
54 | }
55 |
56 | li {
57 | font-size: 14px;
58 | background-color: white;
59 | list-style: none;
60 | border: 1px solid #d9d9d9;
61 | border-radius: 2px;
62 | margin: 10px 0 0 0;
63 | }
64 |
65 | li:hover {
66 | background-color: #eeeeee;
67 | }
68 |
69 | li a {
70 | display: inline-block;
71 | width: 100%;
72 | height: 100%;
73 | color: black;
74 | text-decoration: none;
75 | padding: 8px 8px;
76 | }
77 |
78 | svg {
79 | margin-top: 20px;
80 | }
81 |
82 | #filter_input {
83 | display: block;
84 | width: 100%;
85 | font-size: 14px;
86 | padding: 8px 8px;
87 | border: 1px solid #d9d9d9;
88 | border-radius: 2px;
89 | box-sizing: border-box;
90 | }
91 |
92 | #filter_label, #arguments_label {
93 | color: #666666;
94 | font-size: 16px;
95 | }
96 |
97 | #latest_value_label {
98 | margin-bottom: 20px;
99 | }
100 |
101 | plottable .title-label text{
102 | font-size: 16px;
103 | font-family: roboto, sans-serif;
104 | }
105 |
--------------------------------------------------------------------------------
/cluster/benchmarks/dashboard_app/static/js/benchmark_latency_chart.js:
--------------------------------------------------------------------------------
1 | // Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // http://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 |
15 | /**
16 | * @fileoverview Provides a way to create a benchmark latency chart.
17 | */
18 |
19 | /**
20 | * Constructor.
21 | * @param {string} svg_element_id svg element to add the chart to.
22 | * @param {string} test_id of the test to plot data for.
23 | * @param {string} entry_id of the specific test entry to plot.
24 | */
25 | var BenchmarkLatencyChart = function(svg_element, test_id, entry_id) {
26 | this.svg_element = svg_element;
27 | this.test_id = test_id;
28 | this.entry_id = entry_id;
29 | };
30 |
31 | /**
32 | * Adds data to the given plots.
33 | */
34 | BenchmarkLatencyChart.prototype.addData_ = function(plot) {
35 | const encodedTestId = encodeURIComponent(this.test_id);
36 | const encodedEntryId = encodeURIComponent(this.entry_id);
37 | const jsonDataUrl =
38 | '/benchmark_data/?test=' + encodedTestId + '&entry=' + encodedEntryId
39 | d3.json(jsonDataUrl, function(data) {
40 | benchmarks = []
41 | for (var i = 0; i < data.length; i++) {
42 | const name = this.entry_id;
43 | const timestamp = new Date(+data[i]['start'] / 1000);
44 | const mean_latency = data[i]['timing'];
45 | benchmarks.push(
46 | {name: name, timestamp: timestamp,
47 | mean_latency: +mean_latency});
48 | }
49 | plot.addDataset(
50 | new Plottable.Dataset(benchmarks, {name: 'Forward'}));
51 | });
52 | };
53 |
54 | /**
55 | * Create the chart.
56 | */
57 | BenchmarkLatencyChart.prototype.makeChart = function() {
58 | const xScale = new Plottable.Scales.Time();
59 | const yScaleForward = new Plottable.Scales.Linear();
60 |
61 | const plot = new LatencyChart(
62 | this.entry_id, 'value',
63 | xScale, yScaleForward);
64 |
65 | this.addData_(plot);
66 |
67 | const table = new Plottable.Components.Table([[plot.table]]);
68 | table.renderTo(this.svg_element);
69 |
70 | plot.addTooltip();
71 | new Plottable.Interactions.Click()
72 | .attachTo(plot.linePlot)
73 | .onClick(function(p) {
74 | plot.updateForPosition(p);
75 | });
76 | };
77 |
--------------------------------------------------------------------------------
/cluster/benchmarks/dashboard_app/templates/index.html:
--------------------------------------------------------------------------------
1 |
17 |
18 |