├── software └── enas │ ├── src │ ├── __init__.py │ ├── ptb │ │ ├── __init__.py │ │ ├── data_utils.py │ │ └── ptb_ops.py │ ├── cifar10 │ │ ├── __init__.py │ │ ├── sample_structures6.txt │ │ ├── sample_structures12.txt │ │ └── cifar10_6_eval.sh │ ├── controller.py │ └── common_ops.py │ ├── img │ └── enas_rnn_cell.png │ └── scripts │ ├── cifar10_micro_search.sh │ ├── ptb_search.sh │ ├── cifar10_macro_search.sh │ ├── cifar10_micro_final.sh │ ├── custom_cifar10_macro_final_6.sh │ ├── ptb_final.sh │ ├── custom_cifar10_macro_final_12.sh │ └── cifar10_6_eval.sh ├── bayesian_optimization ├── Theano-master │ ├── LICENSE.txt │ ├── theano │ │ ├── misc │ │ │ ├── __init__.py │ │ │ ├── tests │ │ │ │ ├── __init__.py │ │ │ │ └── test_cudamat_utils.py │ │ │ ├── check_blas_many.sh │ │ │ ├── latence_gpu_transfert.py │ │ │ ├── buildbot_filter.py │ │ │ └── doubleop.py │ │ ├── sandbox │ │ │ ├── __init__.py │ │ │ ├── tests │ │ │ │ └── __init__.py │ │ │ ├── cuda │ │ │ │ └── tests │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── CudaNdarray.pkl │ │ │ │ │ ├── test_viewop.py │ │ │ │ │ ├── test_neighbours.py │ │ │ │ │ └── test_gradient.py │ │ │ ├── gpuarray │ │ │ │ ├── tests │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── test_neighbours.py │ │ │ │ │ ├── GpuArray.pkl │ │ │ │ │ ├── config.py │ │ │ │ │ └── test_nerv.py │ │ │ │ ├── fp16_help.py │ │ │ │ ├── cudnn_helper.h │ │ │ │ ├── dnn_conv_base.c │ │ │ │ └── conv_desc.c │ │ │ ├── linalg │ │ │ │ ├── tests │ │ │ │ │ └── __init__.py │ │ │ │ └── __init__.py │ │ │ ├── conv.py │ │ │ ├── neighbours.py │ │ │ ├── blocksparse.py │ │ │ └── softsign.py │ │ ├── compile │ │ │ ├── tests │ │ │ │ ├── __init__.py │ │ │ │ ├── test_function_name.py │ │ │ │ ├── test_mode.py │ │ │ │ └── test_nanguardmode.py │ │ │ ├── sandbox │ │ │ │ └── __init__.py │ │ │ └── __init__.py │ │ ├── d3viz │ │ │ ├── tests │ │ │ │ ├── __init__.py │ │ │ │ ├── test_d3viz.py │ │ │ │ └── models.py │ │ │ ├── __init__.py │ │ │ ├── css │ │ │ │ ├── d3-context-menu.css │ │ │ │ └── d3viz.css │ │ │ └── js │ │ │ │ └── d3-context-menu.js │ │ ├── gof │ │ │ ├── tests │ │ │ │ ├── __init__.py │ │ │ │ ├── test_fg_old_crash.pkl │ │ │ │ ├── test_compiledir.py │ │ │ │ ├── test_graph_opt_caching.py │ │ │ │ └── test_optdb.py │ │ │ ├── theano_mod_helper.h │ │ │ ├── null_type.py │ │ │ └── callcache.py │ │ ├── scalar │ │ │ ├── tests │ │ │ │ ├── __init__.py │ │ │ │ ├── test_div_no_future.py │ │ │ │ ├── test_div_future.py │ │ │ │ └── test_basic_sympy.py │ │ │ └── __init__.py │ │ ├── sparse │ │ │ ├── tests │ │ │ │ ├── __init__.py │ │ │ │ └── test_type.py │ │ │ ├── sandbox │ │ │ │ ├── __init__.py │ │ │ │ └── truedot.py │ │ │ ├── __init__.py │ │ │ ├── utils.py │ │ │ └── sharedvar.py │ │ ├── tensor │ │ │ ├── signal │ │ │ │ ├── __init__.py │ │ │ │ ├── tests │ │ │ │ │ └── __init__.py │ │ │ │ └── downsample.py │ │ │ ├── tests │ │ │ │ ├── __init__.py │ │ │ │ ├── shape_opt_cycle.pkl │ │ │ │ ├── test_var.py │ │ │ │ ├── test_xlogx.py │ │ │ │ ├── _test_mpi_roundtrip.py │ │ │ │ └── test_type_other.py │ │ │ └── nnet │ │ │ │ └── tests │ │ │ │ ├── __init__.py │ │ │ │ └── test_opt.py │ │ ├── scan_module │ │ │ ├── tests │ │ │ │ └── __init__.py │ │ │ └── numpy_api_changes.diff │ │ ├── typed_list │ │ │ ├── tests │ │ │ │ └── __init__.py │ │ │ ├── __init__.py │ │ │ └── opt.py │ │ ├── tests │ │ │ ├── __init__.py │ │ │ ├── disturb_mem.py │ │ │ └── test_config.py │ │ ├── version.py │ │ └── raise_op.py │ ├── requirement-rtd.txt │ ├── doc │ │ ├── NEWS.txt │ │ ├── bcast.png │ │ ├── tutorial │ │ │ ├── symbolic_graphs.txt │ │ │ ├── extending_theano.txt │ │ │ ├── apply.png │ │ │ ├── extending_theano_c.txt │ │ │ ├── dlogistic.png │ │ │ ├── logistic.png │ │ │ ├── pics │ │ │ │ ├── d3viz.png │ │ │ │ ├── logreg_pydotprint_predict.png │ │ │ │ ├── logreg_pydotprint_train.png │ │ │ │ └── logreg_pydotprint_prediction.png │ │ │ ├── profiling_example.py │ │ │ ├── adding_solution_1.py │ │ │ ├── logistic.gp │ │ │ ├── python.txt │ │ │ └── index.txt │ │ ├── images │ │ │ ├── lstm.png │ │ │ ├── talk2010.gif │ │ │ ├── talk2010.png │ │ │ ├── Elman_srnn.png │ │ │ ├── blocksparse.png │ │ │ ├── snake_theta2.png │ │ │ ├── theano_logo.png │ │ │ ├── lstm_memorycell.png │ │ │ ├── theano_logo-200x67.png │ │ │ ├── theano-theta-117x117.png │ │ │ ├── theano_logo_allblue_200x46.png │ │ │ ├── theano_logo_allblue_200x54.png │ │ │ ├── theano_logo_allblue_350x95.png │ │ │ ├── theano_logo_allblue_63x21.png │ │ │ └── theano_bw_parens_transparent.png │ │ ├── extending │ │ │ ├── apply.png │ │ │ ├── pics │ │ │ │ ├── symbolic_graph_opt.png │ │ │ │ └── symbolic_graph_unopt.png │ │ │ ├── extending_faq.txt │ │ │ ├── theano_vs_c.txt │ │ │ └── tips.txt │ │ ├── library │ │ │ ├── tensor │ │ │ │ ├── bcast.png │ │ │ │ ├── nnet │ │ │ │ │ ├── sigmoid_prec.png │ │ │ │ │ ├── bn.txt │ │ │ │ │ ├── blocksparse.txt │ │ │ │ │ ├── index.txt │ │ │ │ │ └── neighbours.txt │ │ │ │ ├── opt.txt │ │ │ │ ├── utils.txt │ │ │ │ ├── extra_ops.txt │ │ │ │ ├── nlinalg.txt │ │ │ │ ├── slinalg.txt │ │ │ │ ├── signal │ │ │ │ │ ├── index.txt │ │ │ │ │ ├── downsample.txt │ │ │ │ │ └── conv.txt │ │ │ │ ├── index.txt │ │ │ │ └── io.txt │ │ │ ├── d3viz │ │ │ │ ├── examples │ │ │ │ │ ├── mlp.png │ │ │ │ │ ├── mlp2.pdf │ │ │ │ │ ├── mlp2.png │ │ │ │ │ └── d3viz │ │ │ │ │ │ ├── css │ │ │ │ │ │ ├── d3-context-menu.css │ │ │ │ │ │ └── d3viz.css │ │ │ │ │ │ └── js │ │ │ │ │ │ └── d3-context-menu.js │ │ │ │ └── index_files │ │ │ │ │ ├── index_10_0.png │ │ │ │ │ ├── index_11_0.png │ │ │ │ │ ├── index_24_0.png │ │ │ │ │ └── index_25_0.png │ │ │ ├── compile │ │ │ │ ├── ops.txt │ │ │ │ ├── index.txt │ │ │ │ └── opfromgraph.txt │ │ │ ├── scalar │ │ │ │ └── index.txt │ │ │ ├── sandbox │ │ │ │ ├── gpuarray │ │ │ │ │ ├── type.txt │ │ │ │ │ ├── extra.txt │ │ │ │ │ ├── index.txt │ │ │ │ │ └── op.txt │ │ │ │ ├── neighbours.txt │ │ │ │ ├── cuda │ │ │ │ │ ├── index.txt │ │ │ │ │ ├── type.txt │ │ │ │ │ ├── var.txt │ │ │ │ │ └── op.txt │ │ │ │ ├── rng_mrg.txt │ │ │ │ ├── index.txt │ │ │ │ └── linalg.txt │ │ │ ├── gof │ │ │ │ ├── index.txt │ │ │ │ ├── graph.txt │ │ │ │ ├── type.txt │ │ │ │ ├── utils.txt │ │ │ │ ├── toolbox.txt │ │ │ │ └── fgraph.txt │ │ │ ├── misc │ │ │ │ └── pkl_utils.txt │ │ │ ├── sparse │ │ │ │ └── sandbox.txt │ │ │ ├── index.txt │ │ │ └── typed_list.txt │ │ ├── nextml2015 │ │ │ ├── theano_grad.png │ │ │ └── Makefile │ │ ├── omlw2014 │ │ │ ├── road-runner-1.jpg │ │ │ ├── omlw_presentation.pdf │ │ │ ├── pr_conv_gemm_profile.png │ │ │ ├── Makefile │ │ │ └── logreg.py │ │ ├── hpcs2011_tutorial │ │ │ ├── pics │ │ │ │ ├── mlp.pdf │ │ │ │ ├── mlp.png │ │ │ │ ├── conv.pdf │ │ │ │ ├── conv.png │ │ │ │ ├── pipeline.odg │ │ │ │ ├── pipeline.pdf │ │ │ │ ├── pipeline.png │ │ │ │ ├── CPU_VS_GPU.png │ │ │ │ ├── apply_node.odg │ │ │ │ ├── apply_node.pdf │ │ │ │ ├── apply_node.png │ │ │ │ ├── f_optimized.png │ │ │ │ ├── theano_logo.png │ │ │ │ ├── f_unoptimized.png │ │ │ │ ├── bloc_repartition.png │ │ │ │ ├── multiple_graph.pdf │ │ │ │ ├── multiple_graph.png │ │ │ │ ├── pycuda-logo-crop.pdf │ │ │ │ ├── grid_block_thread.png │ │ │ │ ├── lisabook_logo_text_3.png │ │ │ │ ├── logreg_pydotprint_train.png │ │ │ │ ├── logreg_pydotprint_predic.png │ │ │ │ ├── logreg_pydotprint_prediction.png │ │ │ │ └── Caffeine_Machine_no_background_red.png │ │ │ ├── simple_example.py │ │ │ ├── Makefile │ │ │ ├── pycuda_simple.py │ │ │ ├── scan_pow.py │ │ │ ├── double_op.py │ │ │ └── scan_poly.py │ │ ├── sandbox │ │ │ ├── function.txt │ │ │ ├── tensoroptools.txt │ │ │ ├── functional.txt │ │ │ ├── compilation.txt │ │ │ ├── index.txt │ │ │ ├── index2.txt │ │ │ ├── fg.txt │ │ │ ├── performance.txt │ │ │ ├── software.txt │ │ │ └── max_gotcha.txt │ │ ├── crei2013 │ │ │ ├── pics │ │ │ │ ├── logreg_pydotprint_predict.png │ │ │ │ ├── logreg_pydotprint_train.png │ │ │ │ └── logreg_pydotprint_prediction.png │ │ │ ├── scan_pow.py │ │ │ ├── scan_poly.py │ │ │ ├── ifelse_switch.py │ │ │ ├── gpundarray.txt │ │ │ └── logreg.py │ │ ├── cifarSC2011 │ │ │ ├── pics │ │ │ │ ├── logreg_pydotprint_train.png │ │ │ │ ├── logreg_pydotprint_predict.png │ │ │ │ └── logreg_pydotprint_prediction.png │ │ │ ├── gpundarray.txt │ │ │ └── boot_camp_overview.txt │ │ ├── internal │ │ │ ├── dev_start_guide.txt │ │ │ └── index.txt │ │ ├── proposals │ │ │ ├── index.txt │ │ │ ├── tensor_attributes.txt │ │ │ ├── mongodb_cache.txt │ │ │ ├── premerge.txt │ │ │ ├── intermediate_language.txt │ │ │ ├── dp_optimization.txt │ │ │ └── opt_patterns2.txt │ │ ├── developer │ │ │ ├── index.txt │ │ │ └── tensor.txt │ │ ├── nice_quotes.txt │ │ ├── generate_dtype_tensor_table.py │ │ ├── theano_installer_for_anaconda.bat │ │ ├── acknowledgement.txt │ │ └── core_development_guide.txt │ ├── setup.cfg │ ├── benchmark │ │ ├── autoencoder │ │ │ ├── Makefile │ │ │ └── aa_numpy.py │ │ ├── convolution │ │ │ ├── bench.sh │ │ │ ├── scipy_conv.py │ │ │ ├── opencv.py │ │ │ └── conv2d.py │ │ └── README │ ├── MANIFEST.in │ ├── NEWS_DEV.txt │ ├── bin │ │ └── theano-test │ ├── Theano.sln │ ├── DESCRIPTION.txt │ └── README.txt ├── run_vis_BN.sh ├── sample_structures6.txt ├── run_bo_ENAS12.sh ├── run_bo_BN.sh ├── run_bo_ENAS.sh ├── compute_score.R └── evaluate_BN.py ├── compute_score.R └── LICENSE /software/enas/src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /software/enas/src/ptb/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /software/enas/src/cifar10/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/LICENSE.txt: -------------------------------------------------------------------------------- 1 | doc/LICENSE.txt -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/misc/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sandbox/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/compile/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/d3viz/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/gof/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/misc/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sandbox/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/scalar/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sparse/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/tensor/signal/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/tensor/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/compile/sandbox/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sandbox/cuda/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/scan_module/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sparse/sandbox/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/tensor/nnet/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/typed_list/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sandbox/gpuarray/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sandbox/linalg/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/tensor/signal/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/requirement-rtd.txt: -------------------------------------------------------------------------------- 1 | sphinx>=1.3.0 2 | nose>=1.3.0 3 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/NEWS.txt: -------------------------------------------------------------------------------- 1 | .. _NEWS: 2 | 3 | .. include:: ../NEWS.txt 4 | -------------------------------------------------------------------------------- /software/enas/img/enas_rnn_cell.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/software/enas/img/enas_rnn_cell.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/bcast.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/bcast.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/tutorial/symbolic_graphs.txt: -------------------------------------------------------------------------------- 1 | :orphan: 2 | 3 | This page has been moved. Please refer to: :ref:`graphstructures`. 4 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/images/lstm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/images/lstm.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/tutorial/extending_theano.txt: -------------------------------------------------------------------------------- 1 | :orphan: 2 | 3 | This page has been moved. Please refer to: :ref:`extending_theano`. 4 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/tutorial/apply.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/tutorial/apply.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/tutorial/extending_theano_c.txt: -------------------------------------------------------------------------------- 1 | :orphan: 2 | 3 | This page has been moved. Please refer to: :ref:`extending_theano_c`. 4 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/setup.cfg: -------------------------------------------------------------------------------- 1 | [nosetest] 2 | match=^test 3 | nocapture=1 4 | 5 | [flake8] 6 | ignore=E501,E123,E133,FI12,FI14,FI15,FI50,FI51,FI53 7 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/extending/apply.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/extending/apply.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/images/talk2010.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/images/talk2010.gif -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/images/talk2010.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/images/talk2010.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/images/Elman_srnn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/images/Elman_srnn.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/images/blocksparse.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/images/blocksparse.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/images/snake_theta2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/images/snake_theta2.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/images/theano_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/images/theano_logo.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/tutorial/dlogistic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/tutorial/dlogistic.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/tutorial/logistic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/tutorial/logistic.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/tutorial/pics/d3viz.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/tutorial/pics/d3viz.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/tensor/bcast.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/library/tensor/bcast.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/d3viz/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | from theano.d3viz.d3viz import d3viz, d3write 3 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/images/lstm_memorycell.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/images/lstm_memorycell.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/nextml2015/theano_grad.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/nextml2015/theano_grad.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/omlw2014/road-runner-1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/omlw2014/road-runner-1.jpg -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/mlp.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/mlp.pdf -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/mlp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/mlp.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/images/theano_logo-200x67.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/images/theano_logo-200x67.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/d3viz/examples/mlp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/library/d3viz/examples/mlp.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/omlw2014/omlw_presentation.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/omlw2014/omlw_presentation.pdf -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/sandbox/function.txt: -------------------------------------------------------------------------------- 1 | 2 | .. _function: 3 | 4 | ================== 5 | function interface 6 | ================== 7 | 8 | WRITEME 9 | 10 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/conv.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/conv.pdf -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/conv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/conv.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/images/theano-theta-117x117.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/images/theano-theta-117x117.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/d3viz/examples/mlp2.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/library/d3viz/examples/mlp2.pdf -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/d3viz/examples/mlp2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/library/d3viz/examples/mlp2.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/omlw2014/pr_conv_gemm_profile.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/omlw2014/pr_conv_gemm_profile.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/scalar/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | 3 | from .basic import * 4 | 5 | from .basic_scipy import * 6 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/benchmark/autoencoder/Makefile: -------------------------------------------------------------------------------- 1 | aa.x : aa.cc 2 | g++ -O3 -ffast-math aa.cc -o aa.x -L${PUB_PREFIX}/lib -lgsl ${THEANO_BLAS_LDFLAGS} 3 | 4 | clean : 5 | rm aa.x 6 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/pipeline.odg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/pipeline.odg -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/pipeline.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/pipeline.pdf -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/pipeline.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/gof/tests/test_fg_old_crash.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/theano/gof/tests/test_fg_old_crash.pkl -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/tensor/tests/shape_opt_cycle.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/theano/tensor/tests/shape_opt_cycle.pkl -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/extending/pics/symbolic_graph_opt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/extending/pics/symbolic_graph_opt.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/CPU_VS_GPU.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/CPU_VS_GPU.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/apply_node.odg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/apply_node.odg -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/apply_node.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/apply_node.pdf -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/apply_node.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/apply_node.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/f_optimized.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/f_optimized.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/theano_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/theano_logo.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/images/theano_logo_allblue_200x46.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/images/theano_logo_allblue_200x46.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/images/theano_logo_allblue_200x54.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/images/theano_logo_allblue_200x54.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/images/theano_logo_allblue_350x95.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/images/theano_logo_allblue_350x95.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/images/theano_logo_allblue_63x21.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/images/theano_logo_allblue_63x21.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/tensor/nnet/sigmoid_prec.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/library/tensor/nnet/sigmoid_prec.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/extending/pics/symbolic_graph_unopt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/extending/pics/symbolic_graph_unopt.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/f_unoptimized.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/f_unoptimized.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/images/theano_bw_parens_transparent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/images/theano_bw_parens_transparent.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/d3viz/index_files/index_10_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/library/d3viz/index_files/index_10_0.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/d3viz/index_files/index_11_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/library/d3viz/index_files/index_11_0.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/d3viz/index_files/index_24_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/library/d3viz/index_files/index_24_0.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/d3viz/index_files/index_25_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/library/d3viz/index_files/index_25_0.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/crei2013/pics/logreg_pydotprint_predict.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/crei2013/pics/logreg_pydotprint_predict.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/crei2013/pics/logreg_pydotprint_train.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/crei2013/pics/logreg_pydotprint_train.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/bloc_repartition.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/bloc_repartition.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/multiple_graph.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/multiple_graph.pdf -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/multiple_graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/multiple_graph.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/pycuda-logo-crop.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/pycuda-logo-crop.pdf -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/tutorial/pics/logreg_pydotprint_predict.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/tutorial/pics/logreg_pydotprint_predict.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/tutorial/pics/logreg_pydotprint_train.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/tutorial/pics/logreg_pydotprint_train.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/typed_list/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | from .type import TypedListType 3 | from .basic import * 4 | from . import opt 5 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/cifarSC2011/pics/logreg_pydotprint_train.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/cifarSC2011/pics/logreg_pydotprint_train.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/grid_block_thread.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/grid_block_thread.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/cifarSC2011/pics/logreg_pydotprint_predict.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/cifarSC2011/pics/logreg_pydotprint_predict.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/crei2013/pics/logreg_pydotprint_prediction.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/crei2013/pics/logreg_pydotprint_prediction.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/lisabook_logo_text_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/lisabook_logo_text_3.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/nextml2015/Makefile: -------------------------------------------------------------------------------- 1 | presentation.pdf: presentation.tex 2 | pdflatex presentation.tex 3 | pdflatex presentation.tex 4 | 5 | clean: 6 | rm *.toc *.snm *.aux *.log *.nav *.out *.vrb 7 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/sandbox/tensoroptools.txt: -------------------------------------------------------------------------------- 1 | 2 | .. _tensoroptools: 3 | 4 | ================ 5 | Tensor Op Tools 6 | ================ 7 | 8 | WRITEME - describe how to use Elemwise here 9 | 10 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/tutorial/pics/logreg_pydotprint_prediction.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/tutorial/pics/logreg_pydotprint_prediction.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/cifarSC2011/pics/logreg_pydotprint_prediction.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/cifarSC2011/pics/logreg_pydotprint_prediction.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/logreg_pydotprint_train.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/logreg_pydotprint_train.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/logreg_pydotprint_predic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/logreg_pydotprint_predic.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/logreg_pydotprint_prediction.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/logreg_pydotprint_prediction.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/tests/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | 3 | try: 4 | from theano.tests.main import main, TheanoNoseTester 5 | except ImportError: 6 | pass 7 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/Caffeine_Machine_no_background_red.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/muhanzhang/D-VAE/HEAD/bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pics/Caffeine_Machine_no_background_red.png -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/sandbox/functional.txt: -------------------------------------------------------------------------------- 1 | 2 | ========== 3 | Functional 4 | ========== 5 | 6 | Want to know about Theano's `function design 7 | `? 8 | -------------------------------------------------------------------------------- /bayesian_optimization/run_vis_BN.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python bo.py \ 4 | --data-name="asia_200k" \ 5 | --save-appendix="DVAE" \ 6 | --checkpoint=100 \ 7 | --vis-2d \ 8 | --res-dir="vis_results/" \ 9 | 10 | #--save-appendix="SVAE" \ 11 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/internal/dev_start_guide.txt: -------------------------------------------------------------------------------- 1 | .. _old_dev_start_guide: 2 | 3 | ============================ 4 | Developer Start Guide MOVED! 5 | ============================ 6 | 7 | 8 | The developer start guide :ref:`moved `. 9 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/benchmark/convolution/bench.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python opencv.py $@ 4 | python conv2d.py $@ 5 | python scipy_conv.py $@ 6 | 7 | echo "WARNING the mode is valid for theano and scipy, but opencv use the mode same! Can opencv do the mode full?" 8 | -------------------------------------------------------------------------------- /bayesian_optimization/sample_structures6.txt: -------------------------------------------------------------------------------- 1 | [[1], [4, 0], [5, 0, 0], [0, 1, 1, 1], [2, 1, 1, 1, 1], [3, 0, 0, 0, 0, 1]] 2 | [[5], [3, 1], [2, 1, 1], [3, 1, 1, 0], [5, 0, 0, 0, 1], [2, 0, 1, 1, 1, 0]] 3 | [[1], [1, 1], [5, 1, 0], [4, 1, 0, 0], [4, 0, 1, 1, 1], [2, 1, 0, 1, 0, 1]] 4 | -------------------------------------------------------------------------------- /software/enas/src/cifar10/sample_structures6.txt: -------------------------------------------------------------------------------- 1 | [[1], [4, 0], [5, 0, 0], [0, 1, 1, 1], [2, 1, 1, 1, 1], [3, 0, 0, 0, 0, 1]] 2 | [[5], [3, 1], [2, 1, 1], [3, 1, 1, 0], [5, 0, 0, 0, 1], [2, 0, 1, 1, 1, 0]] 3 | [[1], [1, 1], [5, 1, 0], [4, 1, 0, 0], [4, 0, 1, 1, 1], [2, 1, 0, 1, 0, 1]] 4 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/compile/ops.txt: -------------------------------------------------------------------------------- 1 | ================================================== 2 | :mod:`ops` -- Some Common Ops and extra Ops stuff 3 | ================================================== 4 | 5 | .. automodule:: theano.compile.ops 6 | :members: 7 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/scalar/index.txt: -------------------------------------------------------------------------------- 1 | 2 | .. _libdoc_scalar: 3 | 4 | ============================================================== 5 | :mod:`scalar` -- Symbolic Scalar Types, Ops [doc TODO] 6 | ============================================================== 7 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sandbox/conv.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import sys 3 | print("DEPRECATION: theano.sandbox.conv no longer provides conv. " 4 | "They have been moved to theano.tensor.nnet.conv", file=sys.stderr) 5 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/proposals/index.txt: -------------------------------------------------------------------------------- 1 | :orphan: 2 | 3 | .. _proposals: 4 | 5 | ================================== 6 | Proposals for new/revised features 7 | ================================== 8 | 9 | .. toctree:: 10 | :maxdepth: 1 11 | :glob: 12 | 13 | * 14 | 15 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/sandbox/compilation.txt: -------------------------------------------------------------------------------- 1 | 2 | .. _compilation: 3 | 4 | ======================= 5 | Compilation and Linking 6 | ======================= 7 | 8 | .. index:: 9 | single: Linker 10 | 11 | .. _linker: 12 | 13 | Linker 14 | ====== 15 | 16 | WRITEME 17 | 18 | 19 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sandbox/linalg/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | from .ops import (cholesky, matrix_inverse, solve, 3 | diag, extract_diag, alloc_diag, 4 | det, psd, eig, eigh, eigvalsh, 5 | trace, spectral_radius_bound) 6 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/sandbox/index.txt: -------------------------------------------------------------------------------- 1 | :orphan: 2 | 3 | ========================================================= 4 | Sandbox, this documentation may or may not be out-of-date 5 | ========================================================= 6 | 7 | .. toctree:: 8 | :glob: 9 | 10 | * 11 | 12 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sparse/tests/test_type.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | 3 | 4 | def test_sparse_type(): 5 | import theano.sparse 6 | # They need to be available even if scipy is not available. 7 | assert hasattr(theano.sparse, "SparseType") 8 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/benchmark/README: -------------------------------------------------------------------------------- 1 | The code written by James Bergstra in this directory has been superseded 2 | by the scipy benchmarking code, which is in another repository: 3 | 4 | https://github.com/jaberg/DeepLearningBenchmarks 5 | 6 | The remainder of the code in this directory remains undocumented. 7 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/internal/index.txt: -------------------------------------------------------------------------------- 1 | 2 | .. _internal: 3 | 4 | ====================== 5 | Internal Documentation 6 | ====================== 7 | 8 | .. toctree:: 9 | :maxdepth: 2 10 | 11 | release 12 | dev_start_guide 13 | metadocumentation 14 | python 15 | how_to_release 16 | -------------------------------------------------------------------------------- /bayesian_optimization/run_bo_ENAS12.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PYTHONPATH="$(pwd)" 4 | 5 | python bo.py \ 6 | --data-name final_structures12 \ 7 | --save-appendix DVAE \ 8 | --checkpoint 800 \ 9 | --res-dir="ENAS12_results/" \ 10 | --BO-rounds 10 \ 11 | --BO-batch-size 50 \ 12 | --random-as-test \ 13 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/sandbox/gpuarray/type.txt: -------------------------------------------------------------------------------- 1 | .. _libdoc_gpuarray_type: 2 | 3 | =================================================== 4 | :mod:`theano.sandbox.gpuarray.type` -- Type classes 5 | =================================================== 6 | 7 | .. automodule:: theano.sandbox.gpuarray.type 8 | :members: 9 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/developer/index.txt: -------------------------------------------------------------------------------- 1 | :orphan: 2 | 3 | .. _developer: 4 | 5 | ============================================== 6 | Theano Design and Implementation Documentation 7 | ============================================== 8 | 9 | 10 | .. toctree:: 11 | :maxdepth: 2 12 | 13 | tensor 14 | scan 15 | compat 16 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/MANIFEST.in: -------------------------------------------------------------------------------- 1 | global-include *.txt 2 | global-include *.c 3 | global-include *.cu 4 | global-include *.cuh 5 | global-include *.cpp 6 | global-include *.h 7 | global-include *.sh 8 | global-include *.pkl 9 | recursive-include docs 10 | include bin/theano-cache 11 | include bin/theano-nose 12 | include bin/theano-test 13 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/version.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | try: 3 | from theano.generated_version import * # noqa 4 | except ImportError: 5 | short_version = 'unknown' 6 | version = 'unknown' 7 | git_revision = 'unknown' 8 | full_version = 'unknown' 9 | release = False 10 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/sandbox/index2.txt: -------------------------------------------------------------------------------- 1 | 2 | .. _advanced: 3 | 4 | ==================================== 5 | Advanced Topics (under construction) 6 | ==================================== 7 | 8 | .. toctree:: 9 | :maxdepth: 2 10 | 11 | fg 12 | compilation 13 | ccodegen 14 | function 15 | debugging_with_stepmode 16 | 17 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/omlw2014/Makefile: -------------------------------------------------------------------------------- 1 | all: presentation.pdf sharing.pdf 2 | 3 | clean: 4 | rm -f pygpu_ndarray.so core.* *.o *~ 5 | 6 | cleantmp: 7 | rm -f core.* *.o *~ 8 | 9 | presentation.pdf: presentation.tex 10 | pdflatex presentation 11 | pdflatex presentation 12 | 13 | sharing.pdf: sharing.tex 14 | pdflatex sharing 15 | pdflatex sharing 16 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/sandbox/fg.txt: -------------------------------------------------------------------------------- 1 | 2 | .. _fgraph: 3 | 4 | ============= 5 | FunctionGraph 6 | ============= 7 | 8 | TODO: clean up/update the doc/check if complete 9 | WRITEME 10 | 11 | .. autoclass:: theano.gof.fg.FunctionGraph 12 | 13 | .. _fgraphfeature: 14 | 15 | Feature 16 | ======= 17 | 18 | .. autoclass:: theano.gof.toolbox.Feature 19 | :members: 20 | -------------------------------------------------------------------------------- /bayesian_optimization/run_bo_BN.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python bo.py \ 4 | --data-name="asia_200k" \ 5 | --save-appendix="DVAE" \ 6 | --checkpoint=100 \ 7 | --res-dir="BN_results/" \ 8 | --random-as-test \ 9 | --random-baseline \ 10 | 11 | #--save-appendix="SVAE" \ 12 | #--save-appendix="GraphRNN" \ 13 | #--save-appendix="GCN" \ 14 | #--save-appendix="DeepGMG" \ 15 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/NEWS_DEV.txt: -------------------------------------------------------------------------------- 1 | .. _NEWS_DEV: 2 | 3 | =================== 4 | DRAFT Release Notes 5 | =================== 6 | 7 | git log -p rel-0.7... |grep Merge|grep '#[0123456789]' |cut -f 8 -d ' ' | sed 's\#\* https://github.com/Theano/Theano/pull/\' 8 | 9 | git shortlog -sn rel-0.7.. 10 | 11 | 12 | 13 | Theano Development version 14 | ========================== 15 | 16 | NEWS.txt: 17 | 18 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/sandbox/gpuarray/extra.txt: -------------------------------------------------------------------------------- 1 | .. _libdoc_gpuarray_extra: 2 | 3 | ================= 4 | Utility functions 5 | ================= 6 | 7 | Optimisation 8 | ------------ 9 | 10 | .. automodule:: theano.sandbox.gpuarray.opt_util 11 | :members: 12 | 13 | Kernel generation 14 | ----------------- 15 | 16 | .. automodule:: theano.sandbox.gpuarray.kernel_codegen 17 | :members: 18 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/tensor/nnet/bn.txt: -------------------------------------------------------------------------------- 1 | .. _libdoc_tensor_nnet_bn: 2 | 3 | ================================ 4 | :mod:`bn` -- Batch Normalization 5 | ================================ 6 | 7 | .. module:: tensor.nnet.bn 8 | :platform: Unix, Windows 9 | :synopsis: Batch Normalization 10 | .. moduleauthor:: LISA 11 | 12 | 13 | .. autofunction:: theano.tensor.nnet.bn.batch_normalization 14 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/proposals/tensor_attributes.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | ======================== 4 | Add tensor attributes 5 | ======================== 6 | 7 | 8 | Size, shape, psd, symmetric, triangular, contiguous. 9 | Add these attributes to the TensorType with the option always that they be 10 | 'unknown'. 11 | Add attributes that are useful for optimizations, or useful for code generation. 12 | 13 | 14 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sandbox/cuda/tests/CudaNdarray.pkl: -------------------------------------------------------------------------------- 1 | ctheano.sandbox.cuda.type 2 | CudaNdarray_unpickler 3 | p1 4 | (cnumpy.core.multiarray 5 | _reconstruct 6 | p2 7 | (cnumpy 8 | ndarray 9 | p3 10 | (I0 11 | tS'b' 12 | tRp4 13 | (I1 14 | (I1 15 | tcnumpy 16 | dtype 17 | p5 18 | (S'f4' 19 | I0 20 | I1 21 | tRp6 22 | (I3 23 | S'<' 24 | NNNI-1 25 | I-1 26 | I0 27 | tbI00 28 | S'\x00\x00(\xc2' 29 | tbtR. -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/tensor/opt.txt: -------------------------------------------------------------------------------- 1 | =================================================================== 2 | :mod:`tensor.opt` -- Tensor Optimizations 3 | =================================================================== 4 | 5 | .. module:: tensor.opt 6 | :platform: Unix, Windows 7 | :synopsis: Tensor Optimizations 8 | .. moduleauthor:: LISA 9 | 10 | .. automodule:: theano.tensor.opt 11 | :members: 12 | 13 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sandbox/neighbours.py: -------------------------------------------------------------------------------- 1 | """ 2 | Neighbours was moved into theano.tensor.nnet.neighbours. 3 | This file was created for compatibility. 4 | """ 5 | from __future__ import absolute_import, print_function, division 6 | from theano.tensor.nnet.neighbours import (images2neibs, neibs2images, 7 | Images2Neibs) 8 | 9 | __all__ = ["images2neibs", "neibs2images", "Images2Neibs"] 10 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/proposals/mongodb_cache.txt: -------------------------------------------------------------------------------- 1 | 2 | ================= 3 | MongoDB DLL Cache 4 | ================= 5 | 6 | 7 | In network environments (like at DIRO on NFS3), a distributed DB like mongo or couch is faster and more 8 | robust to concurrency than the $HOME/.theano. Also, a single cache could be 9 | shared by multiple users. This would result in less compilation time, for 10 | everyone, and less stale-cache problems. 11 | 12 | -------------------------------------------------------------------------------- /software/enas/src/controller.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | class Controller(object): 4 | def __init__(self, *args, **kwargs): 5 | raise NotImplementedError("Abstract method.") 6 | 7 | def _build_sample(self): 8 | raise NotImplementedError("Abstract method.") 9 | 10 | def _build_greedy(self): 11 | raise NotImplementedError("Abstract method.") 12 | 13 | def _build_trainer(self): 14 | raise NotImplementedError("Abstract method.") 15 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/tutorial/profiling_example.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import numpy 3 | 4 | import theano 5 | 6 | x, y, z = theano.tensor.vectors('xyz') 7 | f = theano.function([x, y, z], [(x + y + z) * 2]) 8 | xv = numpy.random.rand(10).astype(theano.config.floatX) 9 | yv = numpy.random.rand(10).astype(theano.config.floatX) 10 | zv = numpy.random.rand(10).astype(theano.config.floatX) 11 | f(xv, yv, zv) 12 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/sandbox/neighbours.txt: -------------------------------------------------------------------------------- 1 | .. _libdoc_neighbours: 2 | 3 | =================================================================== 4 | :mod:`sandbox.neighbours` -- Neighbours Ops 5 | =================================================================== 6 | 7 | .. module:: sandbox.neighbours 8 | :platform: Unix, Windows 9 | :synopsis: Neighbours Ops 10 | .. moduleauthor:: LISA 11 | 12 | :ref:`Moved ` 13 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sandbox/gpuarray/tests/test_neighbours.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | from theano.tensor.nnet.tests import test_neighbours 3 | 4 | from .config import mode_with_gpu 5 | 6 | from ..neighbours import GpuImages2Neibs 7 | 8 | 9 | class T_GpuImages2Neibs(test_neighbours.T_Images2Neibs): 10 | mode = mode_with_gpu 11 | op = GpuImages2Neibs 12 | dtypes = ['int64', 'float32', 'float64'] 13 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/gof/index.txt: -------------------------------------------------------------------------------- 1 | 2 | .. _libdoc_gof: 3 | 4 | ================================================ 5 | :mod:`gof` -- Theano Internals [doc TODO] 6 | ================================================ 7 | 8 | .. module:: gof 9 | :platform: Unix, Windows 10 | :synopsis: Theano Internals 11 | .. moduleauthor:: LISA 12 | 13 | .. toctree:: 14 | :maxdepth: 1 15 | 16 | graph 17 | fgraph 18 | toolbox 19 | type 20 | utils 21 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/sandbox/cuda/index.txt: -------------------------------------------------------------------------------- 1 | 2 | .. _libdoc_sandbox_cuda: 3 | 4 | =========================================== 5 | :mod:`sandbox.cuda` -- The CUDA GPU backend 6 | =========================================== 7 | 8 | .. module:: sandbox.cuda 9 | :platform: Unix, Windows 10 | :synopsis: Code for GPU programming 11 | .. moduleauthor:: LISA 12 | 13 | .. toctree:: 14 | :maxdepth: 1 15 | 16 | op 17 | var 18 | type 19 | dnn 20 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/developer/tensor.txt: -------------------------------------------------------------------------------- 1 | .. _tensor: 2 | 3 | ======= 4 | Tensor 5 | ======= 6 | 7 | This file describes the design of theano.tensor. 8 | 9 | Elemwise grad and R_op 10 | ====================== 11 | 12 | Here's another straightforward example, though a bit more elaborate 13 | than adding two numbers together. Let's say that you want to compute 14 | the logistic curve, which is given by: 15 | 16 | .. math:: 17 | 18 | s(x) = \frac{1}{1 + e^{-x}} 19 | 20 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/gof/graph.txt: -------------------------------------------------------------------------------- 1 | .. _libdoc_gof_graph: 2 | 3 | ============================================== 4 | :mod:`graph` -- Interface for the Theano graph 5 | ============================================== 6 | 7 | .. module:: graph 8 | :platform: Unix, Windows 9 | :synopsis: Interface for types of symbolic variables 10 | .. moduleauthor:: LISA 11 | 12 | --------- 13 | Reference 14 | --------- 15 | 16 | .. automodule:: theano.gof.graph 17 | :members: 18 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/gof/type.txt: -------------------------------------------------------------------------------- 1 | .. _libdoc_gof_type: 2 | 3 | ================================================ 4 | :mod:`type` -- Interface for types of variables 5 | ================================================ 6 | 7 | .. module:: type 8 | :platform: Unix, Windows 9 | :synopsis: Interface for types of symbolic variables 10 | .. moduleauthor:: LISA 11 | 12 | --------- 13 | Reference 14 | --------- 15 | 16 | .. automodule:: theano.gof.type 17 | :members: 18 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/tensor/utils.txt: -------------------------------------------------------------------------------- 1 | =================================================================== 2 | :mod:`tensor.utils` -- Tensor Utils 3 | =================================================================== 4 | 5 | .. testsetup:: 6 | 7 | from theano.tensor.utils import * 8 | 9 | .. module:: tensor.utils 10 | :platform: Unix, Windows 11 | :synopsis: Tensor Utils 12 | .. moduleauthor:: LISA 13 | 14 | .. automodule:: theano.tensor.utils 15 | :members: 16 | 17 | -------------------------------------------------------------------------------- /bayesian_optimization/run_bo_ENAS.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PYTHONPATH="$(pwd)" 4 | 5 | python bo.py \ 6 | --data-name final_structures6 \ 7 | --save-appendix DVAE \ 8 | --checkpoint 300 \ 9 | --res-dir="ENAS_results/" \ 10 | --BO-rounds 10 \ 11 | --BO-batch-size 50 \ 12 | --random-as-test \ 13 | --random-baseline \ 14 | 15 | #--save-appendix SVAE \ 16 | #--save-appendix GraphRNN \ 17 | #--save-appendix GCN \ 18 | #--save-appendix DeepGMG \ 19 | #--save-appendix DVAE_fast \ 20 | 21 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/sandbox/rng_mrg.txt: -------------------------------------------------------------------------------- 1 | .. _libdoc_rng_mrg: 2 | 3 | =================================================================== 4 | :mod:`sandbox.rng_mrg` -- MRG random number generator 5 | =================================================================== 6 | 7 | .. module:: sandbox.rng_mrg 8 | :platform: Unix, Windows 9 | :synopsis: MRG random number generator 10 | .. moduleauthor:: LISA 11 | 12 | API 13 | === 14 | 15 | .. automodule:: theano.sandbox.rng_mrg 16 | :members: 17 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sandbox/gpuarray/tests/GpuArray.pkl: -------------------------------------------------------------------------------- 1 | ctheano.sandbox.gpuarray.type 2 | GpuArray_unpickler 3 | p0 4 | (cnumpy.core.multiarray 5 | _reconstruct 6 | p1 7 | (cnumpy 8 | ndarray 9 | p2 10 | (I0 11 | tp3 12 | S'b' 13 | p4 14 | tp5 15 | Rp6 16 | (I1 17 | (I1 18 | tp7 19 | cnumpy 20 | dtype 21 | p8 22 | (S'f4' 23 | p9 24 | I0 25 | I1 26 | tp10 27 | Rp11 28 | (I3 29 | S'<' 30 | p12 31 | NNNI-1 32 | I-1 33 | I0 34 | tp13 35 | bI00 36 | S'\x00\x00(\xc2' 37 | p14 38 | tp15 39 | bNtp16 40 | Rp17 41 | . -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/scalar/tests/test_div_no_future.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import theano 3 | import theano.tensor as T 4 | import unittest 5 | 6 | 7 | class test_FutureDiv(unittest.TestCase): 8 | 9 | def test_divide_floats(self): 10 | a = T.dscalar('a') 11 | b = T.dscalar('b') 12 | c = theano.function([a, b], b / a) 13 | d = theano.function([a, b], b // a) 14 | assert c(6, 3) == 0.5 15 | assert d(6, 3) == 0.0 16 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/sandbox/index.txt: -------------------------------------------------------------------------------- 1 | 2 | .. _libdoc_sandbox: 3 | 4 | ============================================================== 5 | :mod:`sandbox` -- Experimental Code 6 | ============================================================== 7 | 8 | .. module:: sandbox 9 | :platform: Unix, Windows 10 | :synopsis: Experimental code 11 | .. moduleauthor:: LISA 12 | 13 | .. toctree:: 14 | :maxdepth: 1 15 | 16 | cuda/index 17 | gpuarray/index 18 | linalg 19 | neighbours 20 | rng_mrg 21 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/tensor/extra_ops.txt: -------------------------------------------------------------------------------- 1 | =================================================================== 2 | :mod:`tensor.extra_ops` -- Tensor Extra Ops 3 | =================================================================== 4 | 5 | .. testsetup:: * 6 | 7 | from theano.tensor.extra_ops import * 8 | 9 | .. module:: tensor.extra_ops 10 | :platform: Unix, Windows 11 | :synopsis: Tensor Extra Ops 12 | .. moduleauthor:: LISA 13 | 14 | .. automodule:: theano.tensor.extra_ops 15 | :members: 16 | 17 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/scalar/tests/test_div_future.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | 3 | import theano 4 | import theano.tensor as T 5 | import unittest 6 | 7 | 8 | class test_FutureDiv(unittest.TestCase): 9 | 10 | def test_divide_floats(self): 11 | a = T.dscalar('a') 12 | b = T.dscalar('b') 13 | c = theano.function([a, b], b / a) 14 | d = theano.function([a, b], b // a) 15 | assert c(6, 3) == 0.5 16 | assert d(6, 3) == 0.0 17 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/sandbox/gpuarray/index.txt: -------------------------------------------------------------------------------- 1 | 2 | .. _libdoc_gpuarray: 3 | 4 | ======================================================= 5 | :mod:`theano.sandbox.gpuarray` -- The (new) GPU backend 6 | ======================================================= 7 | 8 | .. module:: theano.sandbox.gpuarray 9 | :platform: Unix, Windows 10 | :synopsis: Code for GPU programming (new) 11 | .. moduleauthor:: MILA 12 | 13 | .. toctree:: 14 | :maxdepth: 1 15 | 16 | op 17 | dnn 18 | type 19 | extra 20 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/tutorial/adding_solution_1.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Theano tutorial 3 | # Solution to Exercise in section 'Baby Steps - Algebra' 4 | 5 | from __future__ import absolute_import, print_function, division 6 | import theano 7 | a = theano.tensor.vector() # declare variable 8 | b = theano.tensor.vector() # declare variable 9 | out = a ** 2 + b ** 2 + 2 * a * b # build symbolic expression 10 | f = theano.function([a, b], out) # compile function 11 | print(f([1, 2], [4, 5])) # prints [ 25. 49.] 12 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sandbox/gpuarray/fp16_help.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | 3 | 4 | def work_dtype(dtype): 5 | if dtype == 'float16': 6 | return 'float32' 7 | else: 8 | return dtype 9 | 10 | 11 | def load_w(dtype): 12 | if dtype == 'float16': 13 | return '__half2float' 14 | else: 15 | return '' 16 | 17 | 18 | def write_w(dtype): 19 | if dtype == 'float16': 20 | return '__float2half_rn' 21 | else: 22 | return '' 23 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/tensor/nnet/blocksparse.txt: -------------------------------------------------------------------------------- 1 | .. _libdoc_blocksparse: 2 | 3 | =============================================================================== 4 | :mod:`blocksparse` -- Block sparse dot operations (gemv and outer) 5 | =============================================================================== 6 | 7 | .. module:: tensor.nnet.blocksparse 8 | :platform: Unix, Windows 9 | :synopsis: Block sparse dot 10 | .. moduleauthor:: LISA 11 | 12 | 13 | .. automodule:: theano.tensor.nnet.blocksparse 14 | :members: 15 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/proposals/premerge.txt: -------------------------------------------------------------------------------- 1 | 2 | Proactive Merging 3 | ================= 4 | 5 | 6 | Merge is done now as an optimization. 7 | But if Merging was done at graph construction time, things like #476 would work. 8 | Additionally, memo-izing at graph construction time would make it possible to 9 | define recursive formula with recursive python functions (e.g. Fibonacci). 10 | Currently the merge optimization would make the Fibonacci series linear, but the 11 | size of the program used to express the program would be exponential. 12 | 13 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/compile/tests/test_function_name.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | 3 | import unittest 4 | import os 5 | import re 6 | 7 | import theano 8 | from theano import tensor 9 | 10 | 11 | class FunctionName(unittest.TestCase): 12 | def test_function_name(self): 13 | x = tensor.vector('x') 14 | func = theano.function([x], x + 1.) 15 | 16 | regex = re.compile(os.path.basename('.*test_function_name.pyc?:14')) 17 | assert(regex.match(func.name) is not None) 18 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/simple_example.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import theano 3 | a = theano.tensor.vector("a") # declare variable 4 | b = a + a**10 # build symbolic expression 5 | f = theano.function([a], b) # compile function 6 | print(f([0,1,2])) 7 | # prints `array([0,2,1026])` 8 | 9 | theano.printing.pydotprint(b, outfile="pics/f_unoptimized.png", var_with_name_simple=True) 10 | theano.printing.pydotprint(f, outfile="pics/f_optimized.png", var_with_name_simple=True) 11 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/Makefile: -------------------------------------------------------------------------------- 1 | all: presentation.pdf 2 | 3 | presentation.pdf: presentation.tex pics/f_optimized.png pics/logreg_pydotprint_prediction.png 4 | # pics/f_unoptimized.png pics/logreg_pydotprint_predic.png pics/logreg_pydotprint_train.png 5 | pdflatex presentation.tex 6 | 7 | pics/f_optimized.png: simple_example.py 8 | python simple_example.py 9 | 10 | pics/logreg_pydotprint_prediction.png: logreg_example.py 11 | python logreg_example.py 12 | #pics/f_unoptimized.png: simple_example.py 13 | # python simple_example.py 14 | 15 | -------------------------------------------------------------------------------- /compute_score.R: -------------------------------------------------------------------------------- 1 | # compute BIC score of a Bayesnet 2 | # input: adjacency matrix, dataset 3 | # output: BIC score 4 | 5 | library("bnlearn", quietly = TRUE, warn.conflicts = FALSE) 6 | data("asia") 7 | args = commandArgs(trailingOnly = TRUE) 8 | # print(args[1]) 9 | input.file = args[1] 10 | output.file = args[2] 11 | ad.mat = as.matrix(read.table(input.file, sep = " ", col.names = names(asia), row.names = names(asia))) 12 | # print(ad.mat) 13 | # print(dim(ad.mat)) 14 | net = empty.graph(names(asia)) 15 | amat(net) <- ad.mat 16 | # print(net) 17 | s = score(net, asia) 18 | write(s, file = output.file) -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/gof/utils.txt: -------------------------------------------------------------------------------- 1 | .. _libdoc_gof_utils: 2 | 3 | ========================================================== 4 | :mod:`utils` -- Utilities functions operating on the graph 5 | ========================================================== 6 | 7 | .. testsetup:: * 8 | 9 | from theano.gof.utils import * 10 | 11 | .. module:: utils 12 | :platform: Unix, Windows 13 | :synopsis: Utilities functions operating on the graph 14 | .. moduleauthor:: LISA 15 | 16 | --------- 17 | Reference 18 | --------- 19 | 20 | .. automodule:: theano.gof.utils 21 | :members: 22 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/sandbox/linalg.txt: -------------------------------------------------------------------------------- 1 | .. ../../../../theano/sandbox/linalg/ops.py 2 | .. ../../../../theano/sandbox/linalg 3 | 4 | .. _libdoc_sandbox_linalg: 5 | 6 | =================================================================== 7 | :mod:`sandbox.linalg` -- Linear Algebra Ops 8 | =================================================================== 9 | 10 | .. module:: sandbox.linalg 11 | :platform: Unix, Windows 12 | :synopsis: Linear Algebra Ops 13 | .. moduleauthor:: LISA 14 | 15 | API 16 | === 17 | 18 | .. automodule:: theano.sandbox.linalg.ops 19 | :members: 20 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/proposals/intermediate_language.txt: -------------------------------------------------------------------------------- 1 | 2 | ===================== 3 | Intermediate Language 4 | ===================== 5 | 6 | 7 | It would be nice to be able to use Theano from other languages. 8 | This requires two things: a way to communicate the expression to the theano 9 | compiler, and a way to pass data to and from the compiled function. 10 | 11 | One way to do this would be define a textual representation of theano graphs. 12 | A Scheme-like language seems appropriate. Perhaps just scheme would be 13 | appropriate. 14 | 15 | 16 | How to pass shared variables? 17 | 18 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/tutorial/logistic.gp: -------------------------------------------------------------------------------- 1 | set terminal svg font "Bitstream Vera Sans,10" size 300,200 2 | set output "logistic.svg" 3 | 4 | set xrange [-6:6] 5 | set xzeroaxis linetype -1 6 | set yzeroaxis linetype -1 7 | set xtics axis nomirror 8 | set ytics axis nomirror 0,0.5,1 9 | set key off 10 | set grid 11 | set border 1 12 | 13 | set samples 400 14 | 15 | plot 1/(1 + exp(-x)) with line linetype rgbcolor "blue" linewidth 2 16 | 17 | set ytics axis nomirror 0,0.25 18 | set output "dlogistic.svg" 19 | plot 1/(1 + exp(-x)) * (1 - 1/(1 + exp(-x))) with line linetype rgbcolor "blue" linewidth 2 20 | -------------------------------------------------------------------------------- /bayesian_optimization/compute_score.R: -------------------------------------------------------------------------------- 1 | # compute BIC score of a Bayesnet 2 | # input: adjacency matrix, dataset 3 | # output: BIC score 4 | 5 | library("bnlearn", quietly = TRUE, warn.conflicts = FALSE) 6 | data("asia") 7 | args = commandArgs(trailingOnly = TRUE) 8 | # print(args[1]) 9 | input.file = args[1] 10 | output.file = args[2] 11 | ad.mat = as.matrix(read.table(input.file, sep = " ", col.names = names(asia), row.names = names(asia))) 12 | # print(ad.mat) 13 | # print(dim(ad.mat)) 14 | net = empty.graph(names(asia)) 15 | amat(net) <- ad.mat 16 | # print(net) 17 | s = score(net, asia) 18 | write(s, file = output.file) -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/misc/pkl_utils.txt: -------------------------------------------------------------------------------- 1 | 2 | .. _libdoc_misc: 3 | 4 | ================================================ 5 | :mod:`misc.pkl_utils` - Tools for serialization. 6 | ================================================ 7 | 8 | .. testsetup:: * 9 | 10 | from theano.misc.pkl_utils import * 11 | 12 | .. autofunction:: theano.misc.pkl_utils.dump 13 | 14 | .. autofunction:: theano.misc.pkl_utils.load 15 | 16 | .. autoclass:: theano.misc.pkl_utils.StripPickler 17 | 18 | .. autoclass:: theano.misc.pkl_utils.CompatUnpickler 19 | 20 | .. seealso:: 21 | 22 | :ref:`tutorial_loadsave` 23 | 24 | 25 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sandbox/gpuarray/cudnn_helper.h: -------------------------------------------------------------------------------- 1 | #ifndef CUDNN_HELPER_H 2 | #define CUDNN_HELPER_H 3 | 4 | #include 5 | 6 | #ifndef CUDNN_VERSION 7 | 8 | #define CUDNN_VERSION -1 9 | static inline int cudnnGetVersion() { 10 | return -1; 11 | } 12 | #endif 13 | 14 | #include 15 | 16 | // If needed, define element of the V4 interface in terms of elements of 17 | // previous versions 18 | #if defined(CUDNN_VERSION) && CUDNN_VERSION < 4000 19 | 20 | #define CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING 5 21 | #define CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING 3 22 | 23 | #endif 24 | 25 | #endif 26 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/compile/index.txt: -------------------------------------------------------------------------------- 1 | 2 | .. _libdoc_compile: 3 | 4 | ============================================================== 5 | :mod:`compile` -- Transforming Expression Graphs to Functions 6 | ============================================================== 7 | 8 | .. module:: compile 9 | :platform: Unix, Windows 10 | :synopsis: transforming expression graphs to functions 11 | .. moduleauthor:: LISA 12 | 13 | .. toctree:: 14 | :maxdepth: 1 15 | 16 | shared 17 | function 18 | io 19 | ops 20 | mode 21 | debugmode 22 | profilemode 23 | nanguardmode 24 | 25 | 26 | 27 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/d3viz/css/d3-context-menu.css: -------------------------------------------------------------------------------- 1 | .d3-context-menu { 2 | position: absolute; 3 | display: none; 4 | background-color: #f2f2f2; 5 | border-radius: 4px; 6 | 7 | font-family: Arial, sans-serif; 8 | font-size: 14px; 9 | min-width: 50px; 10 | border: 1px solid #d4d4d4; 11 | 12 | z-index:1200; 13 | } 14 | 15 | .d3-context-menu ul { 16 | list-style-type: none; 17 | margin: 4px 0px; 18 | padding: 0px; 19 | cursor: default; 20 | } 21 | 22 | .d3-context-menu ul li { 23 | padding: 4px 16px; 24 | } 25 | 26 | .d3-context-menu ul li:hover { 27 | background-color: #4677f8; 28 | color: #fefefe; 29 | } 30 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/sandbox/cuda/type.txt: -------------------------------------------------------------------------------- 1 | .. ../../../../theano/sandbox/cuda/type.py 2 | .. ../../../../theano/sandbox/cuda/var.py 3 | .. ../../../../theano/sandbox/cuda/ 4 | 5 | .. _libdoc_cuda_type: 6 | 7 | ====================================================================== 8 | :mod:`sandbox.cuda.type` -- The Type object for Cuda-allocated arrays 9 | ====================================================================== 10 | 11 | .. module:: sandbox.cuda.type 12 | :platform: Unix, Windows 13 | :synopsis: The Type object for CUDA-allocated arrays 14 | .. moduleauthor:: LISA 15 | 16 | API 17 | === 18 | 19 | 20 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/d3viz/examples/d3viz/css/d3-context-menu.css: -------------------------------------------------------------------------------- 1 | .d3-context-menu { 2 | position: absolute; 3 | display: none; 4 | background-color: #f2f2f2; 5 | border-radius: 4px; 6 | 7 | font-family: Arial, sans-serif; 8 | font-size: 14px; 9 | min-width: 50px; 10 | border: 1px solid #d4d4d4; 11 | 12 | z-index:1200; 13 | } 14 | 15 | .d3-context-menu ul { 16 | list-style-type: none; 17 | margin: 4px 0px; 18 | padding: 0px; 19 | cursor: default; 20 | } 21 | 22 | .d3-context-menu ul li { 23 | padding: 4px 16px; 24 | } 25 | 26 | .d3-context-menu ul li:hover { 27 | background-color: #4677f8; 28 | color: #fefefe; 29 | } 30 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/tensor/nlinalg.txt: -------------------------------------------------------------------------------- 1 | .. ../../../../theano/sandbox/nlinalg.py 2 | 3 | .. _libdoc_linalg: 4 | 5 | =================================================================== 6 | :mod:`tensor.nlinalg` -- Linear Algebra Ops Using Numpy 7 | =================================================================== 8 | 9 | .. module:: tensor.nlinalg 10 | :platform: Unix, Windows 11 | :synopsis: Linear Algebra Ops Using Numpy 12 | .. moduleauthor:: LISA 13 | 14 | .. note:: 15 | 16 | This module is not imported by default. You need to import it to use it. 17 | 18 | API 19 | === 20 | 21 | .. automodule:: theano.tensor.nlinalg 22 | :members: 23 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/tensor/slinalg.txt: -------------------------------------------------------------------------------- 1 | .. ../../../../theano/sandbox/slinalg.py 2 | 3 | .. _libdoc_slinalg: 4 | 5 | =================================================================== 6 | :mod:`tensor.slinalg` -- Linear Algebra Ops Using Scipy 7 | =================================================================== 8 | 9 | .. module:: tensor.slinalg 10 | :platform: Unix, Windows 11 | :synopsis: Linear Algebra Ops Using Scipy 12 | .. moduleauthor:: LISA 13 | 14 | .. note:: 15 | 16 | This module is not imported by default. You need to import it to use it. 17 | 18 | API 19 | === 20 | 21 | .. automodule:: theano.tensor.slinalg 22 | :members: 23 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/tensor/signal/index.txt: -------------------------------------------------------------------------------- 1 | .. _libdoc_tensor_signal: 2 | 3 | ===================================================== 4 | :mod:`signal` -- Signal Processing 5 | ===================================================== 6 | 7 | Signal Processing 8 | ----------------- 9 | 10 | .. module:: signal 11 | :platform: Unix, Windows 12 | :synopsis: various ops for performing basic signal processing 13 | (convolutions, subsampling, fft, etc.) 14 | .. moduleauthor:: LISA 15 | 16 | The signal subpackage contains ops which are useful for performing various 17 | forms of signal processing. 18 | 19 | .. toctree:: 20 | :maxdepth: 1 21 | 22 | conv 23 | downsample 24 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/misc/check_blas_many.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python misc/check_blas.py --print_only 4 | 5 | cat /proc/cpuinfo |grep "model name" |uniq 6 | cat /proc/cpuinfo |grep processor 7 | free 8 | uname -a 9 | 10 | TIME_PREFIX=time 11 | VAR=OMP_NUM_THREADS 12 | echo "numpy gemm take=" 13 | THEANO_FLAGS=blas.ldflags= $TIME_PREFIX python misc/check_blas.py --quiet 14 | for i in 1 2 4 8 15 | do 16 | export $VAR=$i 17 | x=`$TIME_PREFIX python misc/check_blas.py --quiet` 18 | echo "theano gemm with $VAR=$i took: ${x}s" 19 | done 20 | 21 | #Fred to test distro numpy at LISA: PYTHONPATH=/u/bastienf/repos:/usr/lib64/python2.5/site-packages THEANO_FLAGS=blas.ldflags= OMP_NUM_THREADS=8 time python misc/check_blas.py 22 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/bin/theano-test: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """IPython Test Suite Runner. 4 | """ 5 | from __future__ import print_function 6 | 7 | # The tests can't even run if nose isn't available, so might as well give the 8 | # user a civilized error message in that case. 9 | 10 | try: 11 | import nose 12 | except ImportError: 13 | error = """\ 14 | ERROR: The IPython test suite requires nose to run. 15 | 16 | Please install nose on your system first and try again. 17 | For information on installing nose, see: 18 | http://nose.readthedocs.org/en/latest/ 19 | 20 | Exiting.""" 21 | import sys 22 | print(error, file=sys.stderr) 23 | else: 24 | import theano 25 | theano.test() 26 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/sandbox/cuda/var.txt: -------------------------------------------------------------------------------- 1 | .. ../../../../theano/sandbox/cuda/type.py 2 | .. ../../../../theano/sandbox/cuda/var.py 3 | .. ../../../../theano/sandbox/cuda/ 4 | 5 | .. _libdoc_cuda_var: 6 | 7 | =================================================================== 8 | :mod:`sandbox.cuda.var` -- The Variables for Cuda-allocated arrays 9 | =================================================================== 10 | 11 | .. module:: sandbox.cuda.var 12 | :platform: Unix, Windows 13 | :synopsis: The Variables object for CUDA-allocated arrays 14 | .. moduleauthor:: LISA 15 | 16 | API 17 | === 18 | 19 | .. autoclass:: theano.sandbox.cuda.var.CudaNdarraySharedVariable 20 | :members: get_value, set_value 21 | 22 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/sparse/sandbox.txt: -------------------------------------------------------------------------------- 1 | .. ../../../../theano/sparse/sandbox/sp.py 2 | .. ../../../../theano/sparse/sandbox/truedot.py 3 | 4 | .. _libdoc_sparse_sandbox: 5 | 6 | =================================================================== 7 | :mod:`sparse.sandbox` -- Sparse Op Sandbox 8 | =================================================================== 9 | 10 | .. module:: sparse.sandbox 11 | :platform: Unix, Windows 12 | :synopsis: Sparse Op Sandbox 13 | .. moduleauthor:: LISA 14 | 15 | API 16 | === 17 | 18 | .. automodule:: theano.sparse.sandbox.sp 19 | :members: 20 | .. automodule:: theano.sparse.sandbox.sp2 21 | :members: 22 | .. automodule:: theano.sparse.sandbox.truedot 23 | :members: 24 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sandbox/blocksparse.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import warnings 3 | from theano.tensor.nnet.blocksparse import ( 4 | SparseBlockGemv, SparseBlockOuter, sparse_block_dot, sparse_block_gemv, 5 | sparse_block_gemv_inplace, sparse_block_outer, sparse_block_outer_inplace) 6 | 7 | __all__ = [SparseBlockGemv, SparseBlockOuter, sparse_block_dot, 8 | sparse_block_gemv, sparse_block_gemv_inplace, sparse_block_outer, 9 | sparse_block_outer_inplace] 10 | 11 | warnings.warn("DEPRECATION: theano.sandbox.blocksparse does not exist anymore," 12 | "it has been moved to theano.tensor.nnet.blocksparse.", 13 | category=DeprecationWarning) 14 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/tensor/nnet/index.txt: -------------------------------------------------------------------------------- 1 | .. _libdoc_tensor_nnet: 2 | 3 | ================================================== 4 | :mod:`nnet` -- Ops related to neural networks 5 | ================================================== 6 | 7 | .. module:: nnet 8 | :platform: Unix, Windows 9 | :synopsis: various ops relating to neural networks 10 | .. moduleauthor:: LISA 11 | 12 | Theano was originally developped for machine learning applications, particularly 13 | for the topic of deep learning. As such, our lab has developed many functions 14 | and ops which are particular to neural networks and deep learning. 15 | 16 | .. toctree:: 17 | :maxdepth: 1 18 | 19 | conv 20 | nnet 21 | neighbours 22 | bn 23 | blocksparse 24 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/tensor/nnet/neighbours.txt: -------------------------------------------------------------------------------- 1 | .. _libdoc_tensor_nnet_neighbours: 2 | 3 | ======================================================================= 4 | :mod:`neighbours` -- Ops for working with images in convolutional nets 5 | ======================================================================= 6 | 7 | .. module:: theano.tensor.nnet.neighbours 8 | :platform: Unix, Windows 9 | :synopsis: Ops for working with images in conv nets 10 | .. moduleauthor:: LISA 11 | 12 | 13 | Functions 14 | ========= 15 | 16 | .. autofunction:: theano.tensor.nnet.neighbours.images2neibs 17 | 18 | .. autofunction:: theano.tensor.nnet.neighbours.neibs2images 19 | 20 | 21 | See also 22 | ======== 23 | 24 | - :ref:`indexing` 25 | - :ref:`lib_scan` 26 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/tutorial/python.txt: -------------------------------------------------------------------------------- 1 | .. _python: 2 | 3 | 4 | *************** 5 | Python tutorial 6 | *************** 7 | 8 | In this documentation, we suppose that the reader knows Python. Here is a small list of Python 9 | tutorials/exercises if you need to learn it or only need a refresher: 10 | 11 | * `Python Challenge `__ 12 | * `Dive into Python `__ 13 | * `Google Python Class `__ 14 | * `Enthought Python course `__ (free for academics) 15 | 16 | We have a tutorial on how :ref:`Python manages its memory `. 17 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/tensor/signal/downsample.txt: -------------------------------------------------------------------------------- 1 | .. _libdoc_tensor_signal_downsample: 2 | 3 | ====================================================== 4 | :mod:`downsample` -- Down-Sampling 5 | ====================================================== 6 | 7 | .. module:: downsample 8 | :platform: Unix, Windows 9 | :synopsis: ops for performing various forms of downsampling 10 | .. moduleauthor:: LISA 11 | 12 | .. seealso:: :func:`theano.tensor.nnet.neighbours.images2neibs` 13 | 14 | .. autofunction:: theano.tensor.signal.downsample.max_pool_2d 15 | .. autofunction:: theano.tensor.signal.downsample.max_pool_2d_same_size 16 | 17 | .. function:: fft(*todo) 18 | 19 | [James has some code for this, but hasn't gotten it into the source tree yet.] 20 | 21 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/proposals/dp_optimization.txt: -------------------------------------------------------------------------------- 1 | 2 | ======================== 3 | DP Instruction Selection 4 | ======================== 5 | 6 | Read Ch 9 of Modern Compiler Implementation about instruction selection. 7 | We should probably be doing graph optimization totally differently: 8 | Optimizations *only add* new ways of implementing something, they do not replace 9 | the old way. Every graph node (apply) as a cost, and Dynamic Programming (DP) 10 | is used to select the minimum cost graph. 11 | 12 | The advantage of this approach is that optimizations do not have to run in such 13 | a careful order, and graph selection would be much faster. 14 | 15 | 16 | Think about how aliasing and destructive operations (the destroy-handler) would 17 | fit in this approach. 18 | 19 | -------------------------------------------------------------------------------- /software/enas/src/cifar10/sample_structures12.txt: -------------------------------------------------------------------------------- 1 | [[2], [1, 1], [0, 0, 0], [5, 0, 1, 1], [4, 0, 1, 0, 0], [3, 1, 0, 0, 0, 1], [1, 1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0, 0, 1], [5, 1, 1, 0, 1, 1, 1, 1, 0], [4, 1, 0, 0, 1, 1, 0, 1, 1, 0], [4, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0], [3, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0]] 2 | [[2], [4, 0], [3, 0, 1], [4, 0, 1, 1], [3, 0, 1, 0, 0], [4, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 1], [3, 1, 1, 1, 0, 0, 1, 1], [2, 1, 0, 0, 0, 0, 0, 1, 1], [2, 1, 1, 1, 1, 0, 1, 1, 0, 1], [1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1], [5, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1]] 3 | [[2], [2, 0], [0, 0, 0], [5, 1, 0, 0], [5, 1, 0, 1, 1], [2, 1, 0, 1, 1, 1], [4, 1, 1, 0, 1, 1, 1], [0, 0, 0, 1, 0, 1, 0, 0], [2, 1, 1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 1, 0, 1, 0, 1, 0], [1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1], [4, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0]] 4 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/pycuda_simple.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import pycuda.autoinit 3 | import pycuda.driver as drv 4 | import numpy 5 | 6 | from pycuda.compiler import SourceModule 7 | mod = SourceModule(""" 8 | __global__ void multiply_them(float *dest, float *a, float *b) 9 | { 10 | const int i = threadIdx.x; 11 | dest[i] = a[i] * b[i]; 12 | } 13 | """) 14 | 15 | multiply_them = mod.get_function("multiply_them") 16 | 17 | a = numpy.random.randn(400).astype(numpy.float32) 18 | b = numpy.random.randn(400).astype(numpy.float32) 19 | 20 | dest = numpy.zeros_like(a) 21 | multiply_them( 22 | drv.Out(dest), drv.In(a), drv.In(b), 23 | block=(400,1,1), grid=(1,1)) 24 | 25 | assert numpy.allclose(dest, a*b) 26 | print(dest) 27 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/Theano.sln: -------------------------------------------------------------------------------- 1 | 2 | Microsoft Visual Studio Solution File, Format Version 11.00 3 | # Visual Studio 2010 4 | Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "Theano", "Theano.pyproj", "{B67D762D-0020-4E02-9DDF-7DB4F89B1DD3}" 5 | EndProject 6 | Global 7 | GlobalSection(SolutionConfigurationPlatforms) = preSolution 8 | Debug|Any CPU = Debug|Any CPU 9 | Release|Any CPU = Release|Any CPU 10 | EndGlobalSection 11 | GlobalSection(ProjectConfigurationPlatforms) = postSolution 12 | {B67D762D-0020-4E02-9DDF-7DB4F89B1DD3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU 13 | {B67D762D-0020-4E02-9DDF-7DB4F89B1DD3}.Release|Any CPU.ActiveCfg = Release|Any CPU 14 | EndGlobalSection 15 | GlobalSection(SolutionProperties) = preSolution 16 | HideSolutionNode = FALSE 17 | EndGlobalSection 18 | EndGlobal 19 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/misc/latence_gpu_transfert.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | 3 | import time 4 | 5 | import numpy 6 | 7 | import theano 8 | 9 | y = theano.tensor.fvector() 10 | x = theano.shared(numpy.zeros(1, dtype='float32')) 11 | f1 = theano.function([y], updates={x: y}) 12 | f2 = theano.function([], theano.sandbox.cuda.host_from_gpu(x)) 13 | print(f1.maker.fgraph.toposort()) 14 | print(f2.maker.fgraph.toposort()) 15 | for i in [1, 10, 100, 1000, 10000, 100000, 1000000, 10000000]: 16 | o = numpy.zeros(i, dtype='float32') 17 | t0 = time.time() 18 | f1(o) 19 | t1 = time.time() 20 | tf1 = t1 - t0 21 | t0 = time.time() 22 | f2() 23 | t1 = time.time() 24 | 25 | print("%8i %6.1f ns %7.1f ns" % (i, tf1 * 1e6, (t1 - t0) * 1e6)) 26 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sparse/sandbox/truedot.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import unittest 3 | 4 | import theano 5 | import numpy 6 | import scipy.sparse as sp 7 | 8 | from theano import sparse 9 | from theano import gof, tensor, compile 10 | 11 | from theano.sparse.tests.test_basic import eval_outputs 12 | from theano.sparse.basic import ( 13 | _is_sparse_variable, _is_dense_variable, 14 | as_sparse_variable, _is_sparse, _mtypes, _mtype_to_str) 15 | from theano.sparse import SparseType, dense_from_sparse, transpose 16 | 17 | from theano.sparse.tests.test_basic import sparse_random_inputs 18 | from theano.tests import unittest_tools as utt 19 | from theano.sparse import verify_grad_sparse 20 | 21 | # To maintain compatibility 22 | from theano.sparse.basic import TrueDot, true_dot 23 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/tensor/index.txt: -------------------------------------------------------------------------------- 1 | .. _libdoc_tensor: 2 | 3 | ================================================== 4 | :mod:`tensor` -- Types and Ops for Symbolic numpy 5 | ================================================== 6 | 7 | .. module:: tensor 8 | :platform: Unix, Windows 9 | :synopsis: symbolic types and operations for n-dimensional arrays. 10 | .. moduleauthor:: LISA 11 | 12 | Theano's strength is in expressing symbolic calculations involving tensors. 13 | There are many types of symbolic expressions for tensors. 14 | They are grouped into the following sections: 15 | 16 | 17 | .. toctree:: 18 | :maxdepth: 1 19 | 20 | basic 21 | nnet/index 22 | raw_random 23 | shared_randomstreams 24 | signal/index 25 | utils 26 | extra_ops 27 | io 28 | opt 29 | slinalg 30 | nlinalg 31 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/scan_pow.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import theano 3 | import theano.tensor as T 4 | from six.moves import xrange 5 | 6 | k = T.iscalar("k"); A = T.vector("A") 7 | 8 | def inner_fct(prior_result, A): return prior_result * A 9 | # Symbolic description of the result 10 | result, updates = theano.scan(fn=inner_fct, 11 | outputs_info=T.ones_like(A), 12 | non_sequences=A, n_steps=k) 13 | 14 | # Scan has provided us with A**1 through A**k. Keep only the last 15 | # value. Scan notices this and does not waste memory saving them. 16 | final_result = result[-1] 17 | 18 | power = theano.function(inputs=[A,k], outputs=final_result, 19 | updates=updates) 20 | 21 | print(power(list(range(10)), 2)) 22 | -------------------------------------------------------------------------------- /bayesian_optimization/evaluate_BN.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | 4 | class Eval_BN(object): 5 | def __init__(self, save_dir, R_script='compute_score.R'): 6 | self.save_dir = save_dir 7 | self.R_script = R_script 8 | 9 | def eval(self, input_string): 10 | input_matrix = np.array([int(x) for x in input_string.split()]).reshape(8, 8) 11 | tmp_file = os.path.join(self.save_dir, 'temp_BN_matrix') 12 | np.savetxt(tmp_file, input_matrix) 13 | return self.compute_score(tmp_file) 14 | 15 | def compute_score(self, input_file, output_file=None): 16 | if output_file is None: 17 | output_file = input_file + "_score" 18 | cmd = 'Rscript compute_score.R %s %s' % (input_file, output_file) 19 | os.system(cmd) 20 | score = np.loadtxt(output_file, ndmin=1) 21 | return float(score[0]) 22 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/gof/theano_mod_helper.h: -------------------------------------------------------------------------------- 1 | #ifndef THEANO_MOD_HELPER 2 | #define THEANO_MOD_HELPER 3 | 4 | #include 5 | 6 | #ifndef _WIN32 7 | #define MOD_PUBLIC __attribute__((visibility ("default"))) 8 | #else 9 | /* MOD_PUBLIC is only used in PyMODINIT_FUNC, which is declared 10 | * and implemented in mod.cu/cpp, not in headers, so dllexport 11 | * is always correct. */ 12 | #define MOD_PUBLIC __declspec( dllexport ) 13 | #endif 14 | 15 | #ifdef __cplusplus 16 | #define THEANO_EXTERN extern "C" 17 | #else 18 | #define THEANO_EXTERN 19 | #endif 20 | 21 | #if PY_MAJOR_VERSION < 3 22 | #define THEANO_RTYPE void 23 | #else 24 | #define THEANO_RTYPE PyObject * 25 | #endif 26 | 27 | /* We need to redefine PyMODINIT_FUNC to add MOD_PUBLIC in the middle */ 28 | #undef PyMODINIT_FUNC 29 | #define PyMODINIT_FUNC THEANO_EXTERN MOD_PUBLIC THEANO_RTYPE 30 | 31 | #endif 32 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/sandbox/performance.txt: -------------------------------------------------------------------------------- 1 | 2 | =========== 3 | Performance 4 | =========== 5 | 6 | Theano uses several tricks to obtain good performance: 7 | * common sub-expression elimination 8 | * [custom generated] C code for many operations 9 | * pre-allocation of temporary storage 10 | * loop fusion (which gcc normally can't do) 11 | 12 | On my neural net experiments for my course projects, I was getting around 10x 13 | speed improvements over basic numpy by using theano. 14 | [More specific speed tests would be nice.] 15 | 16 | 17 | With a little work, Theano could also implement more sophisticated 18 | optimizations: 19 | 20 | * automatic ordering of matrix multiplications 21 | * profile-based memory layout decisions (e.g. row-major vs. col-major) 22 | * gcc intrinsics to use MMX, SSE2 parallelism for faster element-wise arithmetic 23 | * conditional expressions 24 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/tests/disturb_mem.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | from datetime import datetime 3 | from six.moves import xrange 4 | 5 | __authors__ = "Ian Goodfellow" 6 | __credits__ = ["Ian Goodfellow"] 7 | __license__ = "3-clause BSD" 8 | __maintainer__ = "Ian Goodfellow" 9 | __email__ = "goodfeli@iro" 10 | 11 | 12 | def disturb_mem(): 13 | # Allocate a time-dependent amount of objects to increase 14 | # chances of subsequently objects' ids changing from run 15 | # to run. This is useful for exposing issues that cause 16 | # non-deterministic behavior due to dependence on memory 17 | # addresses, like iterating over a dict or a set. 18 | global l 19 | now = datetime.now() 20 | ms = now.microsecond 21 | ms = int(ms) 22 | n = ms % 1000 23 | m = ms // 1000 24 | l = [[0] * m for i in xrange(n)] 25 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/double_op.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import numpy 3 | import theano 4 | 5 | class DoubleOp(theano.Op): 6 | def __eq__(self, other): 7 | return type(self) == type(other) 8 | def __hash__(self): 9 | return hash(type(self)) 10 | def __str__(self): 11 | return self.__class__.__name__ 12 | def make_node(self, x): 13 | x = theano.tensor.as_tensor_variable(x) 14 | return theano.Apply(self, [x], [x.type()]) 15 | def perform(self, node, inputs, output_storage): 16 | x = inputs[0] 17 | z = output_storage[0] 18 | z[0] = x * 2 19 | 20 | x = theano.tensor.matrix() 21 | 22 | f = theano.function([x], DoubleOp()(x)) 23 | 24 | inp = numpy.random.rand(5,5) 25 | out = f(inp) 26 | assert numpy.allclose(inp*2, out) 27 | print(inp) 28 | print(out) 29 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/tensor/io.txt: -------------------------------------------------------------------------------- 1 | =================================================================== 2 | :mod:`tensor.io` -- Tensor IO Ops 3 | =================================================================== 4 | 5 | .. module:: tensor.io 6 | :platform: Unix, Windows 7 | :synopsis: Tensor IO Ops 8 | .. moduleauthor:: LISA 9 | 10 | File operation 11 | ============== 12 | 13 | - Load from disk with the function :func:`load ` and its associated op :class:`LoadFromDisk ` 14 | 15 | MPI operation 16 | ============= 17 | - Non-blocking transfer: :func:`isend ` and :func:`irecv `. 18 | - Blocking transfer: :func:`send ` and :func:`recv ` 19 | 20 | Details 21 | ======= 22 | 23 | .. automodule:: theano.tensor.io 24 | :members: 25 | 26 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sandbox/cuda/tests/test_viewop.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import numpy 3 | import unittest 4 | from nose.plugins.skip import SkipTest 5 | 6 | import theano 7 | 8 | mode_with_opt = theano.compile.mode.get_default_mode() 9 | mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu') 10 | 11 | 12 | def test_viewop_gpu(): 13 | from theano.sandbox import cuda 14 | if cuda.cuda_available == False: 15 | raise SkipTest('Optional package cuda disabled') 16 | _x = theano.tensor.fvector('x') 17 | x = cuda.gpu_from_host(_x) 18 | _out = theano.compile.ViewOp()(x) 19 | out = cuda.host_from_gpu(_out) 20 | f = theano.function([x], 21 | out, 22 | mode=mode_with_gpu) 23 | data = numpy.array([1, 2, 3], dtype='float32') 24 | assert numpy.allclose(f(data), data) 25 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/typed_list/opt.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | from theano import gof 3 | from theano import compile 4 | from theano.gof import TopoOptimizer 5 | from theano.typed_list.basic import Reverse, Append, Extend, Insert, Remove 6 | 7 | 8 | @gof.local_optimizer([Append, Extend, Insert, Reverse, Remove], inplace=True) 9 | def typed_list_inplace_opt(node): 10 | if (isinstance(node.op, (Append, Extend, Insert, Reverse, Remove)) and not 11 | node.op.inplace): 12 | 13 | new_op = node.op.__class__(inplace=True) 14 | new_node = new_op(*node.inputs) 15 | return [new_node] 16 | return False 17 | compile.optdb.register('typed_list_inplace_opt', 18 | TopoOptimizer(typed_list_inplace_opt, 19 | failure_callback=TopoOptimizer.warn_inplace), 20 | 60, 'fast_run', 'inplace') 21 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/nice_quotes.txt: -------------------------------------------------------------------------------- 1 | :orphan: 2 | 3 | "Thank YOU for correcting it so quickly. I wish all packages I worked 4 | with would have such an active maintenance - this is as good as it 5 | gets :-)" 6 | -- Jan Antolik, [theano-users] strange behaviour, Mon, Aug 2, 2010 at 1:36 PM 7 | 8 | ------------------------- 9 | 10 | "Theano rocks incredibly. It's like the holy grail of linear algebra 11 | computations." 12 | 13 | -- visionlessvisionary on reddit 14 | 15 | http://www.reddit.com/r/MachineLearning/comments/banhb/deep_learning_tutorial_learn_to_build_complex/c0lsvik 16 | 17 | ------------------------- 18 | 19 | I am completely new to theano and after running the deep-learning 20 | tutorial and see the examples actually work on my GTX 275 I must say I 21 | am 100% sold on the theano approach; this is an amazing project that 22 | deserves broad recognition among the scientific python community. 23 | 24 | -- Olivier Grisel 25 | 26 | 27 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/crei2013/scan_pow.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import theano 3 | import theano.tensor as tt 4 | from six.moves import xrange 5 | 6 | k = tt.iscalar("k") 7 | A = tt.vector("A") 8 | 9 | 10 | def inner_fct(prior_result, A): 11 | return prior_result * A 12 | # Symbolic description of the result 13 | result, updates = theano.scan(fn=inner_fct, 14 | outputs_info=tt.ones_like(A), 15 | non_sequences=A, n_steps=k) 16 | 17 | # Scan has provided us with A**1 through A**k. Keep only the last 18 | # value. Scan notices this and does not waste memory saving them. 19 | final_result = result[-1] 20 | 21 | power = theano.function(inputs=[A, k], 22 | outputs=final_result, 23 | updates=updates) 24 | 25 | print(power(list(range(10)), 2)) 26 | #[ 0. 1. 4. 9. 16. 25. 36. 49. 64. 81.] 27 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sandbox/cuda/tests/test_neighbours.py: -------------------------------------------------------------------------------- 1 | # Skip test if cuda_ndarray is not available. 2 | from __future__ import absolute_import, print_function, division 3 | from nose.plugins.skip import SkipTest 4 | 5 | import theano.sandbox.cuda as cuda_ndarray 6 | if cuda_ndarray.cuda_available == False: 7 | raise SkipTest('Optional package cuda disabled') 8 | 9 | import theano.tensor.nnet.tests.test_neighbours 10 | from theano.sandbox.cuda.neighbours import GpuImages2Neibs 11 | 12 | if theano.config.mode == 'FAST_COMPILE': 13 | mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu') 14 | else: 15 | mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu') 16 | 17 | 18 | class T_GpuImages2Neibs(theano.tensor.nnet.tests.test_neighbours.T_Images2Neibs): 19 | mode = mode_with_gpu 20 | op = GpuImages2Neibs 21 | dtypes = ['float32'] 22 | 23 | if __name__ == '__main__': 24 | unittest.main() 25 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sparse/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import sys 3 | 4 | try: 5 | import scipy 6 | scipy_ver = [int(n) for n in scipy.__version__.split('.')[:2]] 7 | enable_sparse = bool(scipy_ver >= [0, 7]) 8 | if not enable_sparse: 9 | sys.stderr.write("WARNING: scipy version = %s." 10 | " We request version >=0.7.0 for the sparse code as it has" 11 | " bugs fixed in the sparse matrix code.\n" % scipy.__version__) 12 | except ImportError: 13 | enable_sparse = False 14 | sys.stderr.write("WARNING: scipy can't be imported." 15 | " We disable the sparse matrix code.") 16 | 17 | from theano.sparse.type import * 18 | 19 | if enable_sparse: 20 | from theano.sparse.basic import * 21 | from theano.sparse import opt 22 | from theano.sparse import sharedvar 23 | from theano.sparse.sharedvar import sparse_constructor as shared 24 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/gof/toolbox.txt: -------------------------------------------------------------------------------- 1 | .. _libdoc_gof_toolbox: 2 | 3 | ================================================ 4 | :mod:`toolbox` -- [doc TODO] 5 | ================================================ 6 | 7 | .. module:: theano.gof.toolbox 8 | :platform: Unix, Windows 9 | :synopsis: Theano Internals 10 | .. moduleauthor:: LISA 11 | 12 | Guide 13 | ===== 14 | 15 | .. class:: Bookkeeper(object) 16 | 17 | .. class:: History(object) 18 | 19 | .. method:: revert(fgraph, checkpoint) 20 | Reverts the graph to whatever it was at the provided 21 | checkpoint (undoes all replacements). A checkpoint at any 22 | given time can be obtained using self.checkpoint(). 23 | 24 | .. class:: Validator(object) 25 | 26 | .. class:: ReplaceValidate(History, Validator) 27 | 28 | .. method:: replace_validate(fgraph, var, new_var, reason=None) 29 | 30 | .. class:: NodeFinder(Bookkeeper) 31 | 32 | .. class:: PrintListener(object) 33 | 34 | 35 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/benchmark/convolution/scipy_conv.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import sys, timeit 3 | try: 4 | img_shape = int(sys.argv[1]), int(sys.argv[2]) 5 | ker_shape = int(sys.argv[3]), int(sys.argv[4]) 6 | dtype = sys.argv[5] 7 | except: 8 | print("Usage: %s [nb_call]" % sys.argv[0], file=sys.stderr) 9 | sys.exit(-1) 10 | 11 | nb_call = 1 12 | if len(sys.argv)>6: 13 | nb_call=int(sys.argv[6]) 14 | 15 | T = timeit.Timer("f()",""" 16 | from scipy.signal import convolve2d 17 | import numpy 18 | 19 | img_shape = int(sys.argv[1]), int(sys.argv[2]) 20 | ker_shape = int(sys.argv[3]), int(sys.argv[4]) 21 | dtype = sys.argv[5] 22 | 23 | img = numpy.ones(img_shape, dtype=dtype) 24 | ker = numpy.ones(ker_shape, dtype=dtype) 25 | 26 | def f(): 27 | convolve2d(img, ker, mode="valid") 28 | """) 29 | time = T.repeat(repeat=3, number=nb_call) 30 | print(min(time), "scipy") 31 | 32 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/gof/fgraph.txt: -------------------------------------------------------------------------------- 1 | 2 | .. _libdoc_gof_fgraph: 3 | 4 | ================================================ 5 | :mod:`fgraph` -- Graph Container [doc TODO] 6 | ================================================ 7 | 8 | .. module:: fgraph 9 | :platform: Unix, Windows 10 | :synopsis: Theano Internals 11 | .. moduleauthor:: LISA 12 | 13 | 14 | Guide 15 | ===== 16 | 17 | FunctionGraph 18 | ------------- 19 | 20 | .. _libdoc_gof_fgraphfeature: 21 | 22 | FunctionGraph Features 23 | ---------------------- 24 | 25 | .. _libdoc_gof_fgraphfeaturelist: 26 | 27 | FunctionGraph Feature List 28 | ^^^^^^^^^^^^^^^^^^^^^^^^^^ 29 | * ReplaceValidate 30 | * DestroyHandler 31 | 32 | Reference 33 | ========= 34 | 35 | .. class:: FunctionGraph 36 | 37 | ***TODO*** 38 | 39 | .. note:: FunctionGraph(inputs, outputs) clones the inputs by 40 | default. To avoid this behavior, add the parameter 41 | clone=False. This is needed as we do not want cached constants 42 | in fgraph. 43 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sparse/utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | from theano.gof.utils import hash_from_code 3 | 4 | 5 | def hash_from_sparse(data): 6 | # We need to hash the shapes as hash_from_code only hashes 7 | # the data buffer. Otherwise, this will cause problem with shapes like: 8 | # (1, 0) and (2, 0) 9 | # We also need to add the dtype to make the distinction between 10 | # uint32 and int32 of zeros with the same shape. 11 | 12 | # Python hash is not strong, so I always use md5. To avoid having a too 13 | # long hash, I call it again on the contatenation of all parts. 14 | return hash_from_code(hash_from_code(data.data) + 15 | hash_from_code(data.indices) + 16 | hash_from_code(data.indptr) + 17 | hash_from_code(str(data.shape)) + 18 | hash_from_code(str(data.dtype)) + 19 | hash_from_code(data.format)) 20 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/compile/opfromgraph.txt: -------------------------------------------------------------------------------- 1 | :orphan: 2 | 3 | .. _opfromgraph: 4 | 5 | =========== 6 | OpFromGraph 7 | =========== 8 | 9 | This page describes :class:`theano.OpFromGraph 10 | `, an Op that allows to 11 | encapsulate a Theano graph in an op. 12 | 13 | This can be used to encapsulate some functionality in one block. It is 14 | useful to scale Theano compilation for regular bigger graphs when we 15 | reuse that encapsulated fonctionality with different inputs many 16 | times. Due to this encapsulation, it can make Theano compilation phase 17 | faster for graphs with many nodes. 18 | 19 | Using this for small graphs is not recommended as it disables 20 | optimizations between what is inside the encapsulation and outside of it. 21 | 22 | .. note: 23 | 24 | This was not used widely up to now. If you have any 25 | questions/comments do not hesitate to contact us on the mailing list. 26 | 27 | 28 | 29 | .. autoclass:: theano.compile.builders.OpFromGraph 30 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/tensor/signal/conv.txt: -------------------------------------------------------------------------------- 1 | .. _libdoc_tensor_signal_conv: 2 | 3 | ====================================================== 4 | :mod:`conv` -- Convolution 5 | ====================================================== 6 | 7 | .. note:: 8 | 9 | Two similar implementation exists for conv2d: 10 | 11 | :func:`signal.conv2d ` and 12 | :func:`nnet.conv2d `. 13 | 14 | The former implements a traditional 15 | 2D convolution, while the latter implements the convolutional layers 16 | present in convolutional neural networks (where filters are 3D and pool 17 | over several input channels). 18 | 19 | .. module:: conv 20 | :platform: Unix, Windows 21 | :synopsis: ops for performing convolutions 22 | .. moduleauthor:: LISA 23 | 24 | .. autofunction:: theano.tensor.signal.conv.conv2d 25 | 26 | .. function:: fft(*todo) 27 | 28 | [James has some code for this, but hasn't gotten it into the source tree yet.] 29 | 30 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/benchmark/convolution/opencv.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import sys, timeit 3 | import numpy 4 | import scikits.image.opencv 5 | 6 | try: 7 | img_shape = int(sys.argv[1]), int(sys.argv[2]) 8 | ker_shape = int(sys.argv[3]), int(sys.argv[4]) 9 | dtype = sys.argv[5] 10 | except: 11 | print("Usage: %s [nb_call]" % sys.argv[0], file=sys.stderr) 12 | sys.exit(-1) 13 | 14 | nb_call = 1 15 | if len(sys.argv)>6: 16 | nb_call=int(sys.argv[6]) 17 | 18 | T = timeit.Timer("f()",""" 19 | import scikits.image.opencv, sys, numpy 20 | img_shape = int(sys.argv[1]), int(sys.argv[2]) 21 | ker_shape = int(sys.argv[3]), int(sys.argv[4]) 22 | dtype = sys.argv[5] 23 | 24 | img = numpy.ones(img_shape, dtype=dtype) 25 | ker = numpy.ones(ker_shape, dtype=dtype) 26 | 27 | def f(): 28 | scikits.image.opencv.cvFilter2D(img, ker) 29 | """) 30 | time = T.repeat(repeat=3, number=nb_call) 31 | print(min(time), "opencv") 32 | 33 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/crei2013/scan_poly.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import numpy 3 | 4 | import theano 5 | import theano.tensor as tt 6 | 7 | coefficients = theano.tensor.vector("coefficients") 8 | x = tt.scalar("x") 9 | max_coefficients_supported = 10000 10 | 11 | # Generate the components of the polynomial 12 | full_range = theano.tensor.arange(max_coefficients_supported) 13 | components, updates = theano.scan(fn=lambda coeff, power, free_var: 14 | coeff * (free_var ** power), 15 | outputs_info=None, 16 | sequences=[coefficients, full_range], 17 | non_sequences=x) 18 | polynomial = components.sum() 19 | calculate_polynomial = theano.function(inputs=[coefficients, x], 20 | outputs=polynomial) 21 | 22 | test_coeff = numpy.asarray([1, 0, 2], dtype=numpy.float32) 23 | print(calculate_polynomial(test_coeff, 3)) 24 | # 19.0 25 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/hpcs2011_tutorial/scan_poly.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import numpy 3 | 4 | import theano 5 | import theano.tensor as T 6 | 7 | coefficients = theano.tensor.vector("coefficients") 8 | x = T.scalar("x"); max_coefficients_supported = 10000 9 | 10 | # Generate the components of the polynomial 11 | full_range=theano.tensor.arange(max_coefficients_supported) 12 | components, updates = theano.scan(fn=lambda coeff, power, free_var: 13 | coeff * (free_var ** power), 14 | outputs_info=None, 15 | sequences=[coefficients, full_range], 16 | non_sequences=x) 17 | polynomial = components.sum() 18 | calculate_polynomial = theano.function(inputs=[coefficients, x], 19 | outputs=polynomial) 20 | 21 | test_coeff = numpy.asarray([1, 0, 2], dtype=numpy.float32) 22 | print(calculate_polynomial(test_coeff, 3)) 23 | # 19.0 24 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sandbox/gpuarray/tests/config.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | from nose.plugins.skip import SkipTest 3 | 4 | import theano.sandbox.gpuarray 5 | 6 | if theano.sandbox.gpuarray.pygpu is None: 7 | raise SkipTest("pygpu not installed") 8 | 9 | if (not theano.sandbox.gpuarray.pygpu_activated and 10 | not theano.config.init_gpu_device.startswith('gpu')): 11 | theano.sandbox.gpuarray.init_dev('cuda') 12 | 13 | if not theano.sandbox.gpuarray.pygpu_activated: 14 | raise SkipTest("pygpu disabled") 15 | 16 | test_ctx_name = None 17 | 18 | if theano.config.mode == 'FAST_COMPILE': 19 | mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpuarray').excluding('gpu') 20 | mode_without_gpu = theano.compile.mode.get_mode('FAST_RUN').excluding('gpuarray') 21 | else: 22 | mode_with_gpu = theano.compile.mode.get_default_mode().including('gpuarray').excluding('gpu') 23 | mode_without_gpu = theano.compile.mode.get_default_mode().excluding('gpuarray') 24 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/gof/tests/test_compiledir.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | from theano.configdefaults import short_platform 3 | 4 | 5 | def test_short_platform(): 6 | for r, p, a in [ # (release, platform, answer) 7 | ('3.2.0-70-generic', 8 | 'Linux-3.2.0-70-generic-x86_64-with-debian-wheezy-sid', 9 | "Linux-3.2--generic-x86_64-with-debian-wheezy-sid"), 10 | ('3.2.0-70.1-generic', 11 | 'Linux-3.2.0-70.1-generic-x86_64-with-debian-wheezy-sid', 12 | "Linux-3.2--generic-x86_64-with-debian-wheezy-sid"), 13 | ('3.2.0-70.1.2-generic', 14 | 'Linux-3.2.0-70.1.2-generic-x86_64-with-debian-wheezy-sid', 15 | "Linux-3.2--generic-x86_64-with-debian-wheezy-sid"), 16 | ('2.6.35.14-106.fc14.x86_64', 17 | 'Linux-2.6.35.14-106.fc14.x86_64-x86_64-with-fedora-14-Laughlin', 18 | 'Linux-2.6-fc14.x86_64-x86_64-with-fedora-14-Laughlin'), 19 | ]: 20 | o = short_platform(r, p) 21 | assert o == a, (o, a) 22 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/sandbox/cuda/op.txt: -------------------------------------------------------------------------------- 1 | 2 | .. _libdoc_cuda_op: 3 | 4 | ====================================================== 5 | :mod:`sandbox.cuda` -- List of CUDA GPU Op implemented 6 | ====================================================== 7 | 8 | .. moduleauthor:: LISA 9 | 10 | Normally you should not call directly those Ops! Theano should automatically transform cpu ops to their gpu equivalent. So this list is just useful to let people know what is implemented on the gpu. 11 | 12 | Basic Op 13 | ======== 14 | 15 | .. automodule:: theano.sandbox.cuda.basic_ops 16 | :members: 17 | 18 | Blas Op 19 | ======= 20 | 21 | .. automodule:: theano.sandbox.cuda.blas 22 | :members: 23 | .. autoclass:: theano.sandbox.cuda.blas.GpuBatchedDot 24 | 25 | Nnet Op 26 | ======= 27 | 28 | .. automodule:: theano.sandbox.cuda.nnet 29 | :members: 30 | 31 | Curand Op 32 | ========= 33 | 34 | Random generator based on the CURAND libraries. It is not inserted automatically. 35 | 36 | .. automodule:: theano.sandbox.cuda.rng_curand 37 | :members: 38 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/misc/buildbot_filter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import absolute_import, print_function, division 3 | import sys 4 | 5 | 6 | def filter_output(fd_in): 7 | s = "" 8 | for line in fd_in: 9 | toks = line.split() 10 | if len(toks): 11 | if toks[0] == "File" and toks[-1].startswith('test'): 12 | s += line 13 | elif toks[0].startswith("ImportError"): 14 | s += line 15 | elif toks[0] in ["KnownFailureTest:", "Exception:", "Failure:", 16 | "AssertionError", "AssertionError:", 17 | "GradientError:"]: 18 | s += line 19 | elif toks[0] == "Executing" and toks[1] in ["tests", 'nosetests']: 20 | s += line 21 | return s 22 | 23 | if __name__ == "__main__": 24 | import pdb 25 | pdb.set_trace() 26 | if len(sys.argv) > 1: 27 | with open(sys.argv[1]) as f: 28 | print(filter_output(f)) 29 | else: 30 | print(filter_output(sys.stdin)) 31 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/sandbox/software.txt: -------------------------------------------------------------------------------- 1 | =============== 2 | Others software 3 | =============== 4 | 5 | Other software to look at and maybe recommend to users: 6 | 7 | * [http://www.pytables.org/moin PyTables] - This is looking really 8 | promising for dataset storage and experiment logging... This might 9 | actually be useful for large data sets. 10 | * [http://matplotlib.sourceforge.net/ MatPlotLib] - visualization tools 11 | (plot curves interactively, like matlab's figure window) 12 | * [http://www.pythonware.com/products/pil/ PIL] - Python Image Library: 13 | write your matrices out in png! (Kinda a weird recommendation, I think) 14 | * [http://www.logilab.org/857 pylint] - Syntax checker for python to 15 | help beautify your code. (We'd be hypocrites to recommend this :) 16 | * [http://www.winpdb.org/ Winpdb] - A Platform Independent Python 17 | Debugger. (Except it doesn't really help you debug Theano graphs) 18 | * [http://wiki.python.org/moin/IntegratedDevelopmentEnvironments Python 19 | Integrated Development Environments] - for all your coding needs 20 | 21 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/raise_op.py: -------------------------------------------------------------------------------- 1 | """Symbolic Op for raising an exception.""" 2 | from __future__ import absolute_import, print_function, division 3 | from theano import gof 4 | 5 | __authors__ = "James Bergstra" 6 | __copyright__ = "(c) 2011, Universite de Montreal" 7 | __license__ = "3-clause BSD License" 8 | __contact__ = "theano-dev " 9 | 10 | __docformat__ = "restructuredtext en" 11 | 12 | 13 | class Raise(gof.Op): 14 | """Op whose perform() raises an exception. 15 | """ 16 | __props__ = ('msg', 'exc') 17 | 18 | def __init__(self, msg="", exc=NotImplementedError): 19 | """ 20 | msg - the argument to the exception 21 | exc - an exception class to raise in self.perform 22 | """ 23 | self.msg = msg 24 | self.exc = exc 25 | 26 | def __str__(self): 27 | return "Raise{%s(%s)}" % (self.exc, self.msg) 28 | 29 | def make_node(self, x): 30 | return gof.Apply(self, [x], [x.type()]) 31 | 32 | def perform(self, node, inputs, out_storage): 33 | raise self.exc(self.msg) 34 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/crei2013/ifelse_switch.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import time 3 | 4 | import numpy 5 | 6 | import theano 7 | from theano import tensor as tt 8 | from six.moves import xrange 9 | from theano.ifelse import ifelse 10 | 11 | a, b = tt.scalars('a', 'b') 12 | x, y = tt.matrices('x', 'y') 13 | 14 | z_switch = tt.switch(tt.lt(a, b), tt.mean(x), tt.mean(y)) 15 | z_lazy = ifelse(tt.lt(a, b), tt.mean(x), tt.mean(y)) 16 | 17 | f_switch = theano.function([a, b, x, y], z_switch) 18 | f_lazyifelse = theano.function([a, b, x, y], z_lazy) 19 | 20 | val1 = 0. 21 | val2 = 1. 22 | big_mat1 = numpy.ones((10000, 1000)) 23 | big_mat2 = numpy.ones((10000, 1000)) 24 | 25 | n_times = 10 26 | 27 | tic = time.clock() 28 | for i in xrange(n_times): 29 | f_switch(val1, val2, big_mat1, big_mat2) 30 | print('time spent evaluating both values %f sec' % (time.clock() - tic)) 31 | 32 | tic = time.clock() 33 | for i in xrange(n_times): 34 | f_lazyifelse(val1, val2, big_mat1, big_mat2) 35 | print('time spent evaluating one value %f sec' % (time.clock() - tic)) 36 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Muhan Zhang 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/tensor/signal/downsample.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | from . import pool 3 | import warnings 4 | 5 | warnings.warn( 6 | "downsample module has been moved to the theano.tensor.signal.pool module.") 7 | max_pool_2d_same_size = pool.max_pool_2d_same_size 8 | max_pool_2d = pool.pool_2d 9 | DownsampleFactorMax = pool.Pool 10 | PoolGrad = pool.PoolGrad 11 | MaxPoolGrad = pool.MaxPoolGrad 12 | AveragePoolGrad = pool.AveragePoolGrad 13 | 14 | 15 | # This is for compatibility with pickled things. It should go away at 16 | # some point. 17 | class DownsampleFactorMaxGrad(object): 18 | def __new__(self, ds, ignore_border, st=None, padding=(0, 0), mode='max'): 19 | if mode == 'max': 20 | return MaxPoolGrad(ds=ds, ignore_border=ignore_border, st=st, 21 | padding=padding) 22 | else: 23 | return AveragePoolGrad(ds=ds, ignore_border=ignore_border, st=st, 24 | padding=padding, mode=mode) 25 | 26 | DownsampleFactorMaxGradGrad = pool.DownsampleFactorMaxGradGrad 27 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/sandbox/gpuarray/op.txt: -------------------------------------------------------------------------------- 1 | .. _libdoc_gpuarray_op: 2 | 3 | ================================ 4 | List of gpuarray Ops implemented 5 | ================================ 6 | 7 | .. moduleauthor:: LISA 8 | 9 | Normally you should not call directly those Ops! Theano should 10 | automatically transform cpu ops to their gpu equivalent. So this list 11 | is just useful to let people know what is implemented on the gpu. 12 | 13 | Basic Op 14 | ======== 15 | 16 | .. automodule:: theano.sandbox.gpuarray.basic_ops 17 | :members: 18 | 19 | Blas Op 20 | ======= 21 | 22 | .. automodule:: theano.sandbox.gpuarray.blas 23 | :members: 24 | 25 | .. automodule:: theano.sandbox.gpuarray.nerv 26 | :members: 27 | 28 | Elemwise Op 29 | =========== 30 | 31 | .. automodule:: theano.sandbox.gpuarray.elemwise 32 | :members: 33 | 34 | Subtensor Op 35 | ============ 36 | 37 | .. automodule:: theano.sandbox.gpuarray.subtensor 38 | :members: 39 | 40 | Nnet Op 41 | ======= 42 | 43 | .. automodule:: theano.sandbox.gpuarray.nnet 44 | :members: 45 | 46 | .. automodule:: theano.sandbox.gpuarray.neighbours 47 | :members: 48 | -------------------------------------------------------------------------------- /software/enas/scripts/cifar10_micro_search.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PYTHONPATH="$(pwd)" 4 | 5 | python src/cifar10/main.py \ 6 | --data_format="NCHW" \ 7 | --search_for="micro" \ 8 | --reset_output_dir \ 9 | --data_path="data/cifar10" \ 10 | --output_dir="outputs" \ 11 | --batch_size=160 \ 12 | --num_epochs=150 \ 13 | --log_every=50 \ 14 | --eval_every_epochs=1 \ 15 | --child_use_aux_heads \ 16 | --child_num_layers=6 \ 17 | --child_out_filters=20 \ 18 | --child_l2_reg=1e-4 \ 19 | --child_num_branches=5 \ 20 | --child_num_cells=5 \ 21 | --child_keep_prob=0.90 \ 22 | --child_drop_path_keep_prob=0.60 \ 23 | --child_lr_cosine \ 24 | --child_lr_max=0.05 \ 25 | --child_lr_min=0.0005 \ 26 | --child_lr_T_0=10 \ 27 | --child_lr_T_mul=2 \ 28 | --controller_training \ 29 | --controller_search_whole_channels \ 30 | --controller_entropy_weight=0.0001 \ 31 | --controller_train_every=1 \ 32 | --controller_sync_replicas \ 33 | --controller_num_aggregate=10 \ 34 | --controller_train_steps=30 \ 35 | --controller_lr=0.0035 \ 36 | --controller_tanh_constant=1.10 \ 37 | --controller_op_tanh_reduce=2.5 \ 38 | "$@" 39 | 40 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/scalar/tests/test_basic_sympy.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | from theano.scalar.basic_sympy import SymPyCCode 3 | from theano.scalar.basic import floats 4 | import theano 5 | 6 | try: 7 | import sympy 8 | xs = sympy.Symbol('x') 9 | ys = sympy.Symbol('y') 10 | except ImportError: 11 | from nose.plugins.skip import SkipTest 12 | raise SkipTest('optional package sympy disabled') 13 | 14 | xt, yt = floats('xy') 15 | 16 | 17 | def test_SymPyCCode(): 18 | op = SymPyCCode([xs, ys], xs + ys) 19 | e = op(xt, yt) 20 | g = theano.gof.FunctionGraph([xt, yt], [e]) 21 | fn = theano.gof.CLinker().accept(g).make_function() 22 | assert fn(1.0, 2.0) == 3.0 23 | 24 | 25 | def test_grad(): 26 | op = SymPyCCode([xs], xs**2) 27 | zt = op(xt) 28 | ztprime = theano.grad(zt, xt) 29 | assert ztprime.owner.op.expr == 2 * xs 30 | 31 | 32 | def test_multivar_grad(): 33 | op = SymPyCCode([xs, ys], xs ** 2 + ys ** 3) 34 | zt = op(xt, yt) 35 | dzdx, dzdy = theano.grad(zt, [xt, yt]) 36 | assert dzdx.owner.op.expr == 2 * xs 37 | assert dzdy.owner.op.expr == 3 * ys ** 2 38 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/generate_dtype_tensor_table.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | 3 | letters = [ 4 | ('b', 'int8'), 5 | ('w', 'int16'), 6 | ('i', 'int32'), 7 | ('l', 'int64'), 8 | ('d', 'float64'), 9 | ('f', 'float32'), 10 | ('c', 'complex64'), 11 | ('z', 'complex128') ] 12 | 13 | shapes = [ 14 | ('scalar', ()), 15 | ('vector', (False,)), 16 | ('row', (True, False)), 17 | ('col', (False, True)), 18 | ('matrix', (False,False)), 19 | ('tensor3', (False,False,False)), 20 | ('tensor4', (False,False,False,False)),] 21 | 22 | hdr = '============ =========== ==== =========== =================================' 23 | print(hdr) 24 | print('Constructor dtype ndim shape broadcastable') 25 | print(hdr) 26 | for letter in letters: 27 | for shape in shapes: 28 | suff = ',)' if len(shape[1])==1 else ')' 29 | s = '(' + ','.join('1' if b else '?' for b in shape[1]) + suff 30 | print('%s%-10s %-10s %-4s %-10s %-20s' %( 31 | letter[0], shape[0], letter[1], len(shape[1]), s, shape[1] 32 | )) 33 | print(hdr) 34 | -------------------------------------------------------------------------------- /software/enas/scripts/ptb_search.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PYTHONPATH="$(pwd)" 4 | 5 | python src/ptb/main.py \ 6 | --search_for="enas" \ 7 | --noreset_output_dir \ 8 | --data_path="data/ptb/ptb.pkl" \ 9 | --output_dir="outputs" \ 10 | --batch_size=20 \ 11 | --child_bptt_steps=35 \ 12 | --num_epochs=100 \ 13 | --child_rhn_depth=12 \ 14 | --child_num_layers=1 \ 15 | --child_lstm_hidden_size=720 \ 16 | --child_lstm_e_keep=0.75 \ 17 | --child_lstm_x_keep=0.25 \ 18 | --child_lstm_h_keep=0.75 \ 19 | --child_lstm_o_keep=0.25 \ 20 | --nochild_lstm_e_skip \ 21 | --child_grad_bound=10.0 \ 22 | --child_lr=0.25 \ 23 | --child_lr_dec_start=12 \ 24 | --child_lr_dec_every=1 \ 25 | --child_lr_dec_rate=0.95 \ 26 | --child_lr_dec_min=0.0005 \ 27 | --child_optim_algo="sgd" \ 28 | --child_l2_reg=1e-7 \ 29 | --log_every=50 \ 30 | --controller_training \ 31 | --controller_train_every=1 \ 32 | --controller_lr=0.001 \ 33 | --controller_sync_replicas \ 34 | --controller_train_steps=100 \ 35 | --controller_num_aggregate=10 \ 36 | --controller_tanh_constant=2.5 \ 37 | --controller_temperature=5.0 \ 38 | --controller_entropy_weight=0.001 \ 39 | --eval_every_epochs=1 40 | 41 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/DESCRIPTION.txt: -------------------------------------------------------------------------------- 1 | Theano is a Python library that allows you to define, optimize, and efficiently evaluate mathematical expressions involving multi-dimensional arrays. It is built on top of NumPy_. Theano features: 2 | 3 | * **tight integration with NumPy:** a similar interface to NumPy's. numpy.ndarrays are also used internally in Theano-compiled functions. 4 | * **transparent use of a GPU:** perform data-intensive computations up to 140x faster than on a CPU (support for float32 only). 5 | * **efficient symbolic differentiation:** Theano can compute derivatives for functions of one or many inputs. 6 | * **speed and stability optimizations:** avoid nasty bugs when computing expressions such as log(1 + exp(x)) for large values of x. 7 | * **dynamic C code generation:** evaluate expressions faster. 8 | * **extensive unit-testing and self-verification:** includes tools for detecting and diagnosing bugs and/or potential problems. 9 | 10 | Theano has been powering large-scale computationally intensive scientific 11 | research since 2007, but it is also approachable enough to be used in the 12 | classroom (IFT6266 at the University of Montreal). 13 | 14 | .. _NumPy: http://numpy.scipy.org/ 15 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/compile/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | from theano.compile.ops import ( 3 | DeepCopyOp, deep_copy_op, register_deep_copy_op_c_code, 4 | Shape, shape, register_shape_c_code, 5 | Shape_i, register_shape_i_c_code, 6 | ViewOp, view_op, register_view_op_c_code, FromFunctionOp, 7 | as_op, Rebroadcast, register_rebroadcast_c_code, 8 | SpecifyShape, specify_shape, register_specify_shape_c_code) 9 | 10 | from theano.compile.function_module import * 11 | 12 | from theano.compile.mode import * 13 | 14 | from theano.compile.io import * 15 | 16 | from theano.compile.debugmode import DebugMode 17 | 18 | from theano.compile.monitormode import MonitorMode 19 | 20 | from theano.compile.profiling import ProfileStats, ScanProfileStats 21 | 22 | from theano.compile.profilemode import ProfileMode 23 | 24 | from theano.compile.sharedvalue import (shared, shared_constructor, 25 | SharedVariable) 26 | from theano.compile.pfunc import pfunc, Param, rebuild_collect_shared 27 | 28 | from theano.compile.builders import * 29 | 30 | from theano.compile.function import function, function_dump 31 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/tensor/tests/test_var.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import numpy as np 3 | from numpy.testing import assert_equal, assert_string_equal 4 | 5 | import theano 6 | import theano.tensor as tt 7 | import theano.tests.unittest_tools as utt 8 | 9 | 10 | def test_numpy_method(): 11 | # This type of code is used frequently by PyMC3 users 12 | x = tt.dmatrix('x') 13 | data = np.random.rand(5, 5) 14 | x.tag.test_value = data 15 | for fct in [np.arccos, np.arccosh, np.arcsin, np.arcsinh, 16 | np.arctan, np.arctanh, np.ceil, np.cos, np.cosh, np.deg2rad, 17 | np.exp, np.exp2, np.expm1, np.floor, np.log, 18 | np.log10, np.log1p, np.log2, np.rad2deg, 19 | np.sin, np.sinh, np.sqrt, np.tan, np.tanh, np.trunc]: 20 | y = fct(x) 21 | f = theano.function([x], y) 22 | utt.assert_allclose(np.nan_to_num(f(data)), 23 | np.nan_to_num(fct(data))) 24 | 25 | 26 | def test_copy(): 27 | x = tt.dmatrix('x') 28 | data = np.random.rand(5, 5) 29 | y = x.copy(name='y') 30 | f = theano.function([x], y) 31 | assert_equal(f(data), data) 32 | assert_string_equal(y.name, 'y') 33 | -------------------------------------------------------------------------------- /software/enas/scripts/cifar10_macro_search.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PYTHONPATH="$(pwd)" 4 | 5 | python src/cifar10/main.py \ 6 | --data_format="NCHW" \ 7 | --search_for="macro" \ 8 | --reset_output_dir \ 9 | --data_path="data/cifar10" \ 10 | --output_dir="outputs" \ 11 | --batch_size=128 \ 12 | --num_epochs=310 \ 13 | --log_every=50 \ 14 | --eval_every_epochs=1 \ 15 | --child_use_aux_heads \ 16 | --child_num_layers=12 \ 17 | --child_out_filters=36 \ 18 | --child_l2_reg=0.00025 \ 19 | --child_num_branches=6 \ 20 | --child_num_cell_layers=5 \ 21 | --child_num_cells=5 \ 22 | --child_keep_prob=0.90 \ 23 | --child_drop_path_keep_prob=0.60 \ 24 | --child_lr_cosine \ 25 | --child_lr_max=0.05 \ 26 | --child_lr_min=0.0005 \ 27 | --child_lr_T_0=10 \ 28 | --child_lr_T_mul=2 \ 29 | --controller_training \ 30 | --controller_search_whole_channels \ 31 | --controller_entropy_weight=0.0001 \ 32 | --controller_train_every=1 \ 33 | --controller_sync_replicas \ 34 | --controller_num_aggregate=20 \ 35 | --controller_train_steps=50 \ 36 | --controller_lr=0.001 \ 37 | --controller_tanh_constant=1.5 \ 38 | --controller_op_tanh_reduce=2.5 \ 39 | --controller_skip_target=0.4 \ 40 | --controller_skip_weight=0.8 \ 41 | "$@" 42 | 43 | -------------------------------------------------------------------------------- /software/enas/src/common_ops.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | 5 | def lstm(x, prev_c, prev_h, w): 6 | ifog = tf.matmul(tf.concat([x, prev_h], axis=1), w) 7 | i, f, o, g = tf.split(ifog, 4, axis=1) 8 | i = tf.sigmoid(i) 9 | f = tf.sigmoid(f) 10 | o = tf.sigmoid(o) 11 | g = tf.tanh(g) 12 | next_c = i * g + f * prev_c 13 | next_h = o * tf.tanh(next_c) 14 | return next_c, next_h 15 | 16 | 17 | def stack_lstm(x, prev_c, prev_h, w): 18 | next_c, next_h = [], [] 19 | for layer_id, (_c, _h, _w) in enumerate(zip(prev_c, prev_h, w)): 20 | inputs = x if layer_id == 0 else next_h[-1] 21 | curr_c, curr_h = lstm(inputs, _c, _h, _w) 22 | next_c.append(curr_c) 23 | next_h.append(curr_h) 24 | return next_c, next_h 25 | 26 | 27 | def create_weight(name, shape, initializer=None, trainable=True, seed=None): 28 | if initializer is None: 29 | initializer = tf.contrib.keras.initializers.he_normal(seed=seed) 30 | return tf.get_variable(name, shape, initializer=initializer, trainable=trainable) 31 | 32 | 33 | def create_bias(name, shape, initializer=None): 34 | if initializer is None: 35 | initializer = tf.constant_initializer(0.0, dtype=tf.float32) 36 | return tf.get_variable(name, shape, initializer=initializer) 37 | 38 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sandbox/softsign.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | 3 | import theano 4 | import theano.tensor 5 | 6 | 7 | class ScalarSoftsign(theano.scalar.UnaryScalarOp): 8 | # TODO : need description for class 9 | @staticmethod 10 | def static_impl(x): 11 | return x / (1.0 + abs(x)) 12 | 13 | def impl(self, x): 14 | return ScalarSoftsign.static_impl(x) 15 | 16 | def grad(self, inp, grads): 17 | x, = inp 18 | gz, = grads 19 | if 'float' in x.type.dtype: 20 | d = (1.0 + abs(x)) 21 | return [gz / (d * d)] 22 | else: 23 | return NotImplemented 24 | 25 | def c_code(self, node, name, inp, out, sub): 26 | x, = inp 27 | z, = out 28 | if node.inputs[0].type in [theano.scalar.float32, 29 | theano.scalar.float64]: 30 | return "%(z)s = %(x)s / (1.0+fabs(%(x)s));" % locals() 31 | raise NotImplementedError('only floating point x is implemented') 32 | 33 | scalar_softsign = ScalarSoftsign(theano.scalar.upgrade_to_float, 34 | name='scalar_softsign') 35 | softsign = theano.tensor.Elemwise(scalar_softsign, name='softsign') 36 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/extending/extending_faq.txt: -------------------------------------------------------------------------------- 1 | 2 | .. _extend_faq: 3 | 4 | ========================================= 5 | Extending Theano: FAQ and Troubleshooting 6 | ========================================= 7 | 8 | I wrote a new Op/Type, and weird stuff is happening... 9 | ------------------------------------------------------ 10 | 11 | First, check the :ref:`op_contract` and the :ref:`type_contract` 12 | and make sure you're following the rules. 13 | Then try running your program in :ref:`using_debugmode`. DebugMode might catch 14 | something that you're not seeing. 15 | 16 | 17 | I wrote a new optimization, but it's not getting used... 18 | --------------------------------------------------------- 19 | 20 | Remember that you have to register optimizations with the :ref:`optdb` 21 | for them to get used by the normal modes like FAST_COMPILE, FAST_RUN, 22 | and DebugMode. 23 | 24 | 25 | I wrote a new optimization, and it changed my results even though I'm pretty sure it is correct. 26 | ------------------------------------------------------------------------------------------------ 27 | 28 | First, check the :ref:`op_contract` and make sure you're following the rules. 29 | Then try running your program in :ref:`using_debugmode`. DebugMode might 30 | catch something that you're not seeing. 31 | 32 | 33 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sparse/sharedvar.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import copy 3 | import scipy.sparse 4 | from theano.compile import shared_constructor, SharedVariable 5 | from theano.sparse.basic import SparseType, _sparse_py_operators 6 | 7 | 8 | class SparseTensorSharedVariable(_sparse_py_operators, SharedVariable): 9 | dtype = property(lambda self: self.type.dtype) 10 | format = property(lambda self: self.type.format) 11 | 12 | 13 | @shared_constructor 14 | def sparse_constructor(value, name=None, strict=False, allow_downcast=None, 15 | borrow=False, format=None): 16 | """ 17 | SharedVariable Constructor for SparseType. 18 | 19 | writeme 20 | 21 | """ 22 | if not isinstance(value, scipy.sparse.spmatrix): 23 | raise TypeError("Expected a sparse matrix in the sparse shared variable constructor. Received: ", 24 | value.__class__) 25 | 26 | if format is None: 27 | format = value.format 28 | type = SparseType(format=format, dtype=value.dtype) 29 | if not borrow: 30 | value = copy.deepcopy(value) 31 | return SparseTensorSharedVariable(type=type, value=value, name=name, 32 | strict=strict, allow_downcast=allow_downcast) 33 | -------------------------------------------------------------------------------- /software/enas/scripts/cifar10_micro_final.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PYTHONPATH="$(pwd)" 4 | 5 | fixed_arc="0 2 0 0 0 4 0 1 0 4 1 1 1 0 0 1 0 2 1 1" 6 | fixed_arc="$fixed_arc 1 0 1 0 0 3 0 2 1 1 3 1 1 0 0 4 0 3 1 1" 7 | 8 | python src/cifar10/main.py \ 9 | --data_format="NCHW" \ 10 | --search_for="micro" \ 11 | --reset_output_dir \ 12 | --data_path="data/cifar10" \ 13 | --output_dir="outputs" \ 14 | --batch_size=144 \ 15 | --num_epochs=630 \ 16 | --log_every=50 \ 17 | --eval_every_epochs=1 \ 18 | --child_fixed_arc="${fixed_arc}" \ 19 | --child_use_aux_heads \ 20 | --child_num_layers=15 \ 21 | --child_out_filters=36 \ 22 | --child_num_branches=5 \ 23 | --child_num_cells=5 \ 24 | --child_keep_prob=0.80 \ 25 | --child_drop_path_keep_prob=0.60 \ 26 | --child_l2_reg=2e-4 \ 27 | --child_lr_cosine \ 28 | --child_lr_max=0.05 \ 29 | --child_lr_min=0.0001 \ 30 | --child_lr_T_0=10 \ 31 | --child_lr_T_mul=2 \ 32 | --nocontroller_training \ 33 | --controller_search_whole_channels \ 34 | --controller_entropy_weight=0.0001 \ 35 | --controller_train_every=1 \ 36 | --controller_sync_replicas \ 37 | --controller_num_aggregate=10 \ 38 | --controller_train_steps=50 \ 39 | --controller_lr=0.001 \ 40 | --controller_tanh_constant=1.50 \ 41 | --controller_op_tanh_reduce=2.5 \ 42 | "$@" 43 | 44 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/compile/tests/test_mode.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import theano 3 | from theano.compile.mode import Mode, AddFeatureOptimizer 4 | from theano.gof.toolbox import NoOutputFromInplace 5 | import theano.tensor as T 6 | 7 | 8 | def test_no_output_from_implace(): 9 | 10 | x = T.matrix() 11 | y = T.matrix() 12 | a = T.dot(x, y) 13 | b = T.tanh(a) 14 | 15 | # Ensure that the elemwise op that produces the output is inplace when 16 | # using a mode that does not include the optimization 17 | fct_no_opt = theano.function([x, y], b, mode="FAST_RUN") 18 | op = fct_no_opt.maker.fgraph.outputs[0].owner.op 19 | assert (hasattr(op, 'destroy_map') and 0 in op.destroy_map) 20 | 21 | # Ensure that the elemwise op that produces the output is not inplace when 22 | # using a mode that includes the optimization 23 | opt = AddFeatureOptimizer(NoOutputFromInplace()) 24 | mode_opt = Mode(linker="cvm", optimizer="fast_run").register((opt, 49.9)) 25 | 26 | fct_opt = theano.function([x, y], b, mode=mode_opt) 27 | op = fct_opt.maker.fgraph.outputs[0].owner.op 28 | assert (not hasattr(op, 'destroy_map') or 0 not in op.destroy_map) 29 | 30 | 31 | def test_including(): 32 | mode = theano.Mode(optimizer='merge') 33 | mode.including('fast_compile') 34 | -------------------------------------------------------------------------------- /software/enas/scripts/custom_cifar10_macro_final_6.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PYTHONPATH="$(pwd)" 4 | 5 | fixed_arc="${1}" 6 | output_appendix="${2}" 7 | filters="${3-96}" 8 | 9 | echo $fixed_arc 10 | 11 | python src/cifar10/main.py \ 12 | --data_format="NCHW" \ 13 | --search_for="macro" \ 14 | --reset_output_dir \ 15 | --data_path="data/cifar10" \ 16 | --output_dir="outputs_${output_appendix}" \ 17 | --batch_size=100 \ 18 | --num_epochs=310 \ 19 | --log_every=50 \ 20 | --eval_every_epochs=1 \ 21 | --child_fixed_arc="${fixed_arc}" \ 22 | --child_use_aux_heads \ 23 | --child_num_layers=6 \ 24 | --child_out_filters=${filters} \ 25 | --child_l2_reg=2e-4 \ 26 | --child_num_branches=6 \ 27 | --child_num_cell_layers=5 \ 28 | --child_keep_prob=0.50 \ 29 | --child_lr_cosine \ 30 | --child_lr_max=0.05 \ 31 | --child_lr_min=0.001 \ 32 | --child_lr_T_0=10 \ 33 | --child_lr_T_mul=2 \ 34 | --nocontroller_training \ 35 | --controller_search_whole_channels \ 36 | --controller_entropy_weight=0.0001 \ 37 | --controller_train_every=1 \ 38 | --controller_sync_replicas \ 39 | --controller_num_aggregate=20 \ 40 | --controller_train_steps=50 \ 41 | --controller_lr=0.001 \ 42 | --controller_tanh_constant=1.5 \ 43 | --controller_op_tanh_reduce=2.5 \ 44 | --controller_skip_target=0.4 \ 45 | --controller_skip_weight=0.8 \ 46 | "$@" 47 | 48 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/cifarSC2011/gpundarray.txt: -------------------------------------------------------------------------------- 1 | 2 | .. _cifar2013_gpundarray: 3 | 4 | ********** 5 | GpuNdArray 6 | ********** 7 | 8 | Why a common GPU ndarray? 9 | 10 | - Currently there are at least 4 different GPU array data structures in use by Python packages 11 | 12 | - CudaNdarray (Theano), GPUArray (PyCUDA), CUDAMatrix (cudamat), GPUArray (PyOpenCL), ... 13 | - There are even more if we include other languages 14 | 15 | - All of them are a subset of the functionality of ``numpy.ndarray`` on the GPU 16 | - Lots of duplicated effort 17 | 18 | - GPU code is harder/slower to do {\bf correctly} and {\bf fast} than on the CPU/Python 19 | 20 | - Lack of a common array API makes it harder to port/reuse code 21 | - Also harder to find/distribute code 22 | - Divides development work 23 | 24 | 25 | Design Goals 26 | 27 | - Make it VERY similar to ``numpy.ndarray`` 28 | - Be compatible with both CUDA and OpenCL 29 | - Have the base object accessible from C to allow collaboration with more projects, across high-level languages 30 | 31 | - We want people from C, C++, Ruby, R, ... all use the same base GPU N-dimensional array 32 | 33 | 34 | Final GpuNdArray Note 35 | 36 | - Under development 37 | - Will be the next GPU array container for Theano (this summer!) 38 | - Probably also for PyCUDA, PyOpenCL 39 | - Mailing list: http://lists.tiker.net/listinfo/gpundarray 40 | 41 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sandbox/cuda/tests/test_gradient.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import unittest 3 | 4 | import theano 5 | from theano import tensor 6 | from theano.sandbox import cuda 7 | 8 | # Skip test if cuda_ndarray is not available. 9 | from nose.plugins.skip import SkipTest 10 | import theano.sandbox.cuda as cuda_ndarray 11 | if cuda_ndarray.cuda_available == False: 12 | raise SkipTest('Optional package cuda disabled') 13 | 14 | 15 | class TestGradient(unittest.TestCase): 16 | verbose = 0 17 | 18 | def test_gpu_out_multiple_clients(self): 19 | # Test that when the output of gpu_from_host is used by more 20 | # than one Op, the gradient still works. 21 | # A problem used to be that GpuFromHost.grad expected the output 22 | # gradient to be on GPU, but the summation of the different 23 | # incoming gradients was done on CPU. 24 | 25 | x = tensor.fmatrix('x') 26 | z = cuda.gpu_from_host(x) 27 | 28 | n1 = tensor.nnet.sigmoid(z) 29 | n2 = tensor.dot(z, z.T) 30 | 31 | s1 = n1.sum() 32 | s2 = n2.sum() 33 | 34 | c = s1 + s2 35 | 36 | dc_dx = theano.grad(c, x) 37 | if self.verbose: 38 | theano.printing.debugprint(c, print_type=True) 39 | theano.printing.debugprint(dc_dx, print_type=True) 40 | -------------------------------------------------------------------------------- /software/enas/scripts/ptb_final.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PYTHONPATH="$(pwd)" 4 | 5 | fixed_arc="0 0 0 1 1 2 1 2 0 2 0 5 1 1 0 6 1 8 1 8 1 8 1" 6 | 7 | python src/ptb/main.py \ 8 | --search_for="enas" \ 9 | --reset_output_dir \ 10 | --data_path="data/ptb/ptb.pkl" \ 11 | --output_dir="outputs" \ 12 | --batch_size=64 \ 13 | --child_bptt_steps=35 \ 14 | --num_epochs=2000 \ 15 | --child_fixed_arc="${fixed_arc}" \ 16 | --child_rhn_depth=12 \ 17 | --child_num_layers=1 \ 18 | --child_lstm_hidden_size=748 \ 19 | --child_lstm_e_keep=0.79 \ 20 | --child_lstm_x_keep=0.25 \ 21 | --child_lstm_h_keep=0.75 \ 22 | --child_lstm_o_keep=0.24 \ 23 | --nochild_lstm_e_skip \ 24 | --child_grad_bound=0.25 \ 25 | --child_lr=20.0 \ 26 | --child_rnn_slowness_reg=1e-3 \ 27 | --child_l2_reg=5e-7 \ 28 | --child_lr_dec_start=14 \ 29 | --child_lr_dec_every=1 \ 30 | --child_lr_dec_rate=0.9991 \ 31 | --child_lr_dec_min=0.001 \ 32 | --child_optim_algo="sgd" \ 33 | --log_every=50 \ 34 | --nocontroller_training \ 35 | --controller_selection_threshold=5 \ 36 | --controller_train_every=1 \ 37 | --controller_lr=0.001 \ 38 | --controller_sync_replicas \ 39 | --controller_train_steps=100 \ 40 | --controller_num_aggregate=10 \ 41 | --controller_tanh_constant=3.0 \ 42 | --controller_temperature=2.0 \ 43 | --controller_entropy_weight=0.0001 \ 44 | --eval_every_epochs=1 45 | 46 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/misc/tests/test_cudamat_utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import numpy 3 | import theano 4 | from theano.misc.cudamat_utils import cudamat_available 5 | 6 | if not cudamat_available: # noqa 7 | from nose.plugins.skip import SkipTest 8 | raise SkipTest("gnumpy not installed. Skip test of theano op with pycuda " 9 | "code.") 10 | 11 | from theano.misc.cudamat_utils import (cudandarray_to_cudamat, 12 | cudamat_to_cudandarray) 13 | 14 | 15 | def test(shape=(3, 4)): 16 | """ 17 | Make sure that the cudamat conversion is exact. 18 | """ 19 | gpu = theano.sandbox.cuda.basic_ops.gpu_from_host 20 | U = gpu(theano.tensor.fmatrix('U')) 21 | ii = theano.function([U], gpu(U + 1)) 22 | 23 | A_cpu = numpy.asarray(numpy.random.rand(*shape), dtype="float32") 24 | A_cnd = theano.sandbox.cuda.CudaNdarray(A_cpu) 25 | A_cmat = cudandarray_to_cudamat(A_cnd) 26 | 27 | B_cnd = cudamat_to_cudandarray(A_cmat) 28 | B_cnd = ii(A_cnd) 29 | 30 | u = A_cnd.copy() 31 | u += theano.sandbox.cuda.CudaNdarray(numpy.asarray([[1]], dtype='float32')) 32 | u = numpy.asarray(u) 33 | v = numpy.asarray(B_cnd) 34 | w = A_cmat.add(1).asarray() 35 | 36 | assert abs(u - v).max() == 0 37 | assert abs(u - w.T.reshape(u.shape)).max() == 0 38 | -------------------------------------------------------------------------------- /software/enas/scripts/custom_cifar10_macro_final_12.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PYTHONPATH="$(pwd)" 4 | 5 | fixed_arc="${1}" 6 | output_appendix="${2}" 7 | filters="${3-96}" 8 | 9 | echo $fixed_arc 10 | 11 | python src/cifar10/main.py \ 12 | --data_format="NCHW" \ 13 | --search_for="macro" \ 14 | --reset_output_dir \ 15 | --data_path="data/cifar10" \ 16 | --output_dir="outputs_${output_appendix}" \ 17 | --batch_size=100 \ 18 | --num_epochs=310 \ 19 | --log_every=50 \ 20 | --eval_every_epochs=1 \ 21 | --child_fixed_arc="${fixed_arc}" \ 22 | --child_use_aux_heads \ 23 | --child_num_layers=12 \ 24 | --structure_path="sample_structures12.txt" 25 | --child_out_filters=${filters} \ 26 | --child_l2_reg=2e-4 \ 27 | --child_num_branches=6 \ 28 | --child_num_cell_layers=5 \ 29 | --child_keep_prob=0.50 \ 30 | --child_lr_cosine \ 31 | --child_lr_max=0.05 \ 32 | --child_lr_min=0.001 \ 33 | --child_lr_T_0=10 \ 34 | --child_lr_T_mul=2 \ 35 | --nocontroller_training \ 36 | --controller_search_whole_channels \ 37 | --controller_entropy_weight=0.0001 \ 38 | --controller_train_every=1 \ 39 | --controller_sync_replicas \ 40 | --controller_num_aggregate=20 \ 41 | --controller_train_steps=50 \ 42 | --controller_lr=0.001 \ 43 | --controller_tanh_constant=1.5 \ 44 | --controller_op_tanh_reduce=2.5 \ 45 | --controller_skip_target=0.4 \ 46 | --controller_skip_weight=0.8 \ 47 | "$@" 48 | 49 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/crei2013/gpundarray.txt: -------------------------------------------------------------------------------- 1 | 2 | .. _crei2013_gpundarray: 3 | 4 | ********** 5 | GpuNdArray 6 | ********** 7 | 8 | Why a common GPU ndarray? 9 | ------------------------- 10 | 11 | - Currently there are at least 4 different GPU array data structures in use by Python packages 12 | 13 | - CudaNdarray (Theano), GPUArray (PyCUDA), CUDAMatrix (cudamat), GPUArray (PyOpenCL), ... 14 | - There are even more if we include other languages 15 | 16 | - All of them are a subset of the functionality of ``numpy.ndarray`` on the GPU 17 | - Lots of duplicated effort 18 | 19 | - GPU code is harder/slower to do {\bf correctly} and {\bf fast} than on the CPU/Python 20 | 21 | - Lack of a common array API makes it harder to port/reuse code 22 | - Also harder to find/distribute code 23 | - Divides development work 24 | 25 | 26 | Design Goals 27 | ------------ 28 | 29 | - Make it VERY similar to ``numpy.ndarray`` 30 | - Be compatible with both CUDA and OpenCL 31 | - Have the base object accessible from C to allow collaboration with more projects, across high-level languages 32 | 33 | - We want people from C, C++, Ruby, R, ... all use the same base GPU N-dimensional array 34 | 35 | 36 | Final Note 37 | ---------- 38 | 39 | - Under development 40 | - Will be the next GPU array container for Theano (*this summer!*) 41 | - Probably also for PyCUDA, PyOpenCL 42 | - Mailing list: http://lists.tiker.net/listinfo/gpundarray 43 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/index.txt: -------------------------------------------------------------------------------- 1 | 2 | .. _libdoc: 3 | 4 | ===================== 5 | Library Documentation 6 | ===================== 7 | 8 | This documentation covers Theano module-wise. This is suited to finding the 9 | Types and Ops that you can use to build and compile expression graphs. 10 | 11 | .. toctree:: 12 | :maxdepth: 1 13 | 14 | tensor/index 15 | gradient 16 | config 17 | printing 18 | d3viz/index 19 | compile/index 20 | sparse/index 21 | sparse/sandbox 22 | scalar/index 23 | gof/index 24 | misc/pkl_utils 25 | scan 26 | sandbox/index 27 | typed_list 28 | 29 | There are also some top-level imports that you might find more convenient: 30 | 31 | 32 | .. module:: theano 33 | :platform: Unix, Windows 34 | :synopsis: Theano top-level import 35 | .. moduleauthor:: LISA 36 | 37 | .. function:: function(...) 38 | 39 | Alias for :func:`function.function` 40 | 41 | 42 | .. function:: function_dump(...) 43 | 44 | Alias for :func:`theano.compile.function.function_dump` 45 | 46 | .. function:: shared(...) 47 | 48 | Alias for :func:`theano.compile.sharedvalue.shared` 49 | 50 | .. class:: In 51 | 52 | Alias for :class:`function.In` 53 | 54 | .. function:: dot(x, y) 55 | 56 | Works like :func:`tensor.dot` for both sparse and dense matrix products 57 | 58 | .. autofunction:: theano.clone 59 | 60 | .. autofunction:: theano.sparse_grad 61 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/d3viz/js/d3-context-menu.js: -------------------------------------------------------------------------------- 1 | d3.contextMenu = function (menu, openCallback) { 2 | 3 | // create the div element that will hold the context menu 4 | d3.selectAll('.d3-context-menu').data([1]) 5 | .enter() 6 | .append('div') 7 | .attr('class', 'd3-context-menu'); 8 | 9 | // close menu 10 | d3.select('body').on('click.d3-context-menu', function() { 11 | d3.select('.d3-context-menu').style('display', 'none'); 12 | }); 13 | 14 | // this gets executed when a contextmenu event occurs 15 | return function(data, index) { 16 | var elm = this; 17 | 18 | d3.selectAll('.d3-context-menu').html(''); 19 | var list = d3.selectAll('.d3-context-menu').append('ul'); 20 | list.selectAll('li').data(menu).enter() 21 | .append('li') 22 | .html(function(d) { 23 | return d.title; 24 | }) 25 | .on('click', function(d, i) { 26 | d.action(elm, data, index); 27 | d3.select('.d3-context-menu').style('display', 'none'); 28 | }); 29 | 30 | // the openCallback allows an action to fire before the menu is displayed 31 | // an example usage would be closing a tooltip 32 | if (openCallback) openCallback(data, index); 33 | 34 | // display context menu 35 | d3.select('.d3-context-menu') 36 | .style('left', (d3.event.pageX - 2) + 'px') 37 | .style('top', (d3.event.pageY - 2) + 'px') 38 | .style('display', 'block'); 39 | 40 | d3.event.preventDefault(); 41 | }; 42 | }; 43 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/gof/null_type.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | from theano.gof.type import Type 3 | 4 | 5 | class NullType(Type): 6 | """ 7 | A type that allows no values. 8 | 9 | Used to represent expressions 10 | that are undefined, either because they do not exist mathematically 11 | or because the code to generate the expression has not been 12 | implemented yet. 13 | 14 | Parameters 15 | ---------- 16 | why_null : str 17 | A string explaining why this variable can't take on any values. 18 | 19 | """ 20 | 21 | def __init__(self, why_null='(no explanation given)'): 22 | self.why_null = why_null 23 | 24 | def filter(self, data, strict=False, allow_downcast=None): 25 | raise ValueError("No values may be assigned to a NullType") 26 | 27 | def filter_variable(self, other, allow_convert=True): 28 | raise ValueError("No values may be assigned to a NullType") 29 | 30 | def may_share_memory(a, b): 31 | return False 32 | 33 | def values_eq(a, b, force_same_dtype=True): 34 | raise ValueError("NullType has no values to compare") 35 | 36 | def __eq__(self, other): 37 | return type(self) == type(other) 38 | 39 | def __hash__(self): 40 | return hash(type(self)) 41 | 42 | def __str__(self): 43 | return 'NullType' 44 | null_type = NullType() 45 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/gof/callcache.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import logging 3 | import six.moves.cPickle as pickle 4 | 5 | _logger = logging.getLogger("theano.gof.callcache") 6 | 7 | 8 | class CallCache(object): 9 | def __init__(self, filename=None): 10 | self.filename = filename 11 | try: 12 | if filename is None: 13 | raise IOError('bad filename') # just goes to except 14 | with open(filename, 'r') as f: 15 | self.cache = pickle.load(f) 16 | except IOError: 17 | self.cache = {} 18 | 19 | def persist(self, filename=None): 20 | if filename is None: 21 | filename = self.filename 22 | with open(filename, 'w') as f: 23 | pickle.dump(self.cache, f) 24 | 25 | def call(self, fn, args=(), key=None): 26 | if key is None: 27 | key = (fn, tuple(args)) 28 | if key not in self.cache: 29 | _logger.debug('cache miss %i', len(self.cache)) 30 | self.cache[key] = fn(*args) 31 | else: 32 | _logger.debug('cache hit %i', len(self.cache)) 33 | return self.cache[key] 34 | 35 | def __del__(self): 36 | try: 37 | if self.filename: 38 | self.persist() 39 | except Exception as e: 40 | _logger.error('persist failed %s %s', self.filename, e) 41 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/d3viz/examples/d3viz/js/d3-context-menu.js: -------------------------------------------------------------------------------- 1 | d3.contextMenu = function (menu, openCallback) { 2 | 3 | // create the div element that will hold the context menu 4 | d3.selectAll('.d3-context-menu').data([1]) 5 | .enter() 6 | .append('div') 7 | .attr('class', 'd3-context-menu'); 8 | 9 | // close menu 10 | d3.select('body').on('click.d3-context-menu', function() { 11 | d3.select('.d3-context-menu').style('display', 'none'); 12 | }); 13 | 14 | // this gets executed when a contextmenu event occurs 15 | return function(data, index) { 16 | var elm = this; 17 | 18 | d3.selectAll('.d3-context-menu').html(''); 19 | var list = d3.selectAll('.d3-context-menu').append('ul'); 20 | list.selectAll('li').data(menu).enter() 21 | .append('li') 22 | .html(function(d) { 23 | return d.title; 24 | }) 25 | .on('click', function(d, i) { 26 | d.action(elm, data, index); 27 | d3.select('.d3-context-menu').style('display', 'none'); 28 | }); 29 | 30 | // the openCallback allows an action to fire before the menu is displayed 31 | // an example usage would be closing a tooltip 32 | if (openCallback) openCallback(data, index); 33 | 34 | // display context menu 35 | d3.select('.d3-context-menu') 36 | .style('left', (d3.event.pageX - 2) + 'px') 37 | .style('top', (d3.event.pageY - 2) + 'px') 38 | .style('display', 'block'); 39 | 40 | d3.event.preventDefault(); 41 | }; 42 | }; 43 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/extending/theano_vs_c.txt: -------------------------------------------------------------------------------- 1 | 2 | .. _theano_vs_c: 3 | 4 | ============ 5 | Theano vs. C 6 | ============ 7 | 8 | We describe some of the patterns in Theano, and present their closest 9 | analogue in a statically typed language such as C: 10 | 11 | =============== =========================================================== 12 | Theano C 13 | =============== =========================================================== 14 | Apply function application / function call 15 | Variable local function data / variable 16 | Shared Variable global function data / variable 17 | Op operations carried out in computation / function definition 18 | Type data types 19 | =============== =========================================================== 20 | 21 | For example: 22 | 23 | .. code-block:: c 24 | 25 | int d = 0; 26 | 27 | int main(int a) { 28 | int b = 3; 29 | int c = f(b) 30 | d = b + c; 31 | return g(a, c); 32 | } 33 | 34 | 35 | Based on this code snippet, we can relate ``f`` and ``g`` to Ops, ``a``, 36 | ``b`` and ``c`` to Variables, ``d`` to Shared Variable, ``g(a, c)``, 37 | ``f(b)`` and ``d = b + c`` (taken as meaning 38 | the action of computing ``f``, ``g`` or ``+`` on their respective inputs) to 39 | Applies. Lastly, ``int`` could be interpreted as the Theano Type of the 40 | Variables ``a``, ``b``, ``c`` and ``d``. 41 | 42 | 43 | 44 | 45 | 46 | 47 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/tensor/tests/test_xlogx.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | from theano.tensor.xlogx import xlogx, xlogy0 3 | 4 | import unittest 5 | 6 | import theano 7 | from theano.tensor import as_tensor_variable 8 | from . import test_basic as TT 9 | 10 | import random 11 | import numpy.random 12 | from theano.tests import unittest_tools as utt 13 | 14 | 15 | class T_XlogX(unittest.TestCase): 16 | def setUp(self): 17 | utt.seed_rng() 18 | 19 | def test0(self): 20 | x = as_tensor_variable([1, 0]) 21 | y = xlogx(x) 22 | f = theano.function([], [y]) 23 | self.assertTrue(numpy.all(f() == numpy.asarray([0, 0.]))) 24 | def test1(self): 25 | # class Dummy(object): 26 | # def make_node(self, a): 27 | # return [xlogx(a)[:,2]] 28 | utt.verify_grad(xlogx, [numpy.random.rand(3, 4)]) 29 | 30 | 31 | class T_XlogY0(unittest.TestCase): 32 | def setUp(self): 33 | utt.seed_rng() 34 | def test2(self): 35 | utt.verify_grad(xlogy0, [numpy.random.rand(3, 4), numpy.random.rand(3, 4)]) 36 | 37 | def test3(self): 38 | x = as_tensor_variable([1, 0]) 39 | y = as_tensor_variable([1, 0]) 40 | z = xlogy0(x, y) 41 | f = theano.function([], z) 42 | self.assertTrue(numpy.all(f() == numpy.asarray([0, 0.]))) 43 | 44 | 45 | if __name__ == '__main__': 46 | unittest.main() 47 | -------------------------------------------------------------------------------- /software/enas/scripts/cifar10_6_eval.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PYTHONPATH="$(pwd)" 4 | 5 | fixed_arc="4" 6 | fixed_arc="$fixed_arc 0 0" 7 | fixed_arc="$fixed_arc 4 1 0" 8 | fixed_arc="$fixed_arc 1 1 1 0" 9 | fixed_arc="$fixed_arc 5 1 0 0 0" 10 | fixed_arc="$fixed_arc 5 1 1 0 1 0" 11 | 12 | python src/cifar10/evaluation.py \ 13 | --data_format="NCHW" \ 14 | --search_for="macro" \ 15 | --data_path="data/cifar10" \ 16 | --output_dir="outputs_6" \ 17 | --child_arc2="${fixed_arc}" \ 18 | --structure_path="structures6.txt" \ 19 | --batch_size=128 \ 20 | --num_epochs=10 \ 21 | --log_every=50 \ 22 | --eval_every_epochs=1 \ 23 | --child_use_aux_heads \ 24 | --child_num_layers=6 \ 25 | --child_out_filters=36 \ 26 | --child_l2_reg=0.00025 \ 27 | --child_num_branches=6 \ 28 | --child_num_cells=5 \ 29 | --child_keep_prob=0.90 \ 30 | --child_drop_path_keep_prob=0.60 \ 31 | --child_lr_cosine \ 32 | --child_lr_max=0.05 \ 33 | --child_lr_min=0.0005 \ 34 | --child_lr_T_0=10 \ 35 | --child_lr_T_mul=2 \ 36 | --controller_training \ 37 | --controller_search_whole_channels \ 38 | --controller_entropy_weight=0.0001 \ 39 | --controller_train_every=1 \ 40 | --controller_sync_replicas \ 41 | --controller_num_aggregate=20 \ 42 | --controller_train_steps=50 \ 43 | --controller_lr=0.001 \ 44 | --controller_tanh_constant=1.5 \ 45 | --controller_op_tanh_reduce=2.5 \ 46 | --controller_skip_target=0.4 \ 47 | --controller_skip_weight=0.8 \ 48 | "$@" 49 | 50 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/proposals/opt_patterns2.txt: -------------------------------------------------------------------------------- 1 | 2 | ====================== 3 | Optimization Patterns 4 | ====================== 5 | 6 | .. note: 7 | Proposed 2010 01 20 8 | 9 | 10 | Motivation 11 | ========== 12 | 13 | Theano optimizations are organized at high level, 14 | but canonicalization and specialization (C&S) are a mess. It is difficult to know how a graph will 15 | be optimized, or to know in which order optimizations will be performed. 16 | C&S is also slow because of the guess-and-check nature of node optimization within equilibrium 17 | optimizers (VERIFY THIS BY PROFILING). 18 | C&S functions are also very difficult and tedious to write because of 19 | symmetries in the graph, and because of the lack of standard Op names 20 | (e.g. ``T.add``, ``T.and_``, and ``T._shape``). Gemm and the advanced_indexing -> xent 21 | optimization are particularly tricky examples. 22 | 23 | Defining a sort of regexp-like approach for describing graph substitutions would ideally be 24 | less error-prone, less tedious, more efficient to evaluate, easier to document, and all-round 25 | better. 26 | 27 | 28 | Proposal 29 | ======== 30 | 31 | In a nutshell: revisit the PatternSub and make it more powerful. 32 | 33 | Olivier B. (original author or PatternSub) mentioned that one of the problems was the annoyance 34 | of working through DimShuffle 35 | 36 | Olivier B. also suggests writing scalar-related patterns in terms of scalars, and then inferring Tensor-related patterns. 37 | 38 | -------------------------------------------------------------------------------- /software/enas/src/cifar10/cifar10_6_eval.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PYTHONPATH="$(pwd)" 4 | 5 | fixed_arc="4" 6 | fixed_arc="$fixed_arc 0 0" 7 | fixed_arc="$fixed_arc 4 1 0" 8 | fixed_arc="$fixed_arc 1 1 1 0" 9 | fixed_arc="$fixed_arc 5 1 0 0 0" 10 | fixed_arc="$fixed_arc 5 1 1 0 1 0" 11 | 12 | python src/cifar10/eval_structure.py \ 13 | --data_format="NCHW" \ 14 | --search_for="macro" \ 15 | --data_path="data/cifar10" \ 16 | --output_dir="outputs_6" \ 17 | --child_arc2="${fixed_arc}" \ 18 | --structure_path="structures6.txt" \ 19 | --batch_size=128 \ 20 | --num_epochs=10 \ 21 | --log_every=50 \ 22 | --eval_every_epochs=1 \ 23 | --child_use_aux_heads \ 24 | --child_num_layers=6 \ 25 | --child_out_filters=36 \ 26 | --child_l2_reg=0.00025 \ 27 | --child_num_branches=6 \ 28 | --child_num_cells=5 \ 29 | --child_keep_prob=0.90 \ 30 | --child_drop_path_keep_prob=0.60 \ 31 | --child_lr_cosine \ 32 | --child_lr_max=0.05 \ 33 | --child_lr_min=0.0005 \ 34 | --child_lr_T_0=10 \ 35 | --child_lr_T_mul=2 \ 36 | --controller_training \ 37 | --controller_search_whole_channels \ 38 | --controller_entropy_weight=0.0001 \ 39 | --controller_train_every=1 \ 40 | --controller_sync_replicas \ 41 | --controller_num_aggregate=20 \ 42 | --controller_train_steps=50 \ 43 | --controller_lr=0.001 \ 44 | --controller_tanh_constant=1.5 \ 45 | --controller_op_tanh_reduce=2.5 \ 46 | --controller_skip_target=0.4 \ 47 | --controller_skip_weight=0.8 \ 48 | "$@" 49 | 50 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/theano_installer_for_anaconda.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | rem if ANACONDA_DIR is not defined 4 | if [%ANACONDA_DIR%] == [^%ANACONDA_DIR^%] ( 5 | if exist "c:\Anaconda" set ANACONDA_DIR=C:\Anaconda 6 | ) 7 | 8 | if [%ANACONDA_DIR%] == [^%ANACONDA_DIR^%] ( 9 | echo "Anaconda not found. Please install AnacondaCE or set the ANACONDA_DIR environment variable to the location of your Anaconda installation." 10 | goto end 11 | ) 12 | 13 | if not exist %ANACONDA_DIR% ( 14 | echo Anaconda install directory %ANACONDA_DIR% does not exist 15 | goto end) 16 | 17 | echo Anaconda found in %ANACONDA_DIR% 18 | echo copying dlls from %ANACONDA_DIR%\MinGW\x86_64-w64-mingw32\lib to %ANACONDA_DIR%\ 19 | copy %ANACONDA_DIR%\MinGW\x86_64-w64-mingw32\lib\*.dll %ANACONDA_DIR% 20 | echo done 21 | 22 | echo Trying to install theano 23 | pip install Theano 24 | echo installed 25 | 26 | rem Put a default .theanorc.txt 27 | set THEANORC=%USERPROFILE%\.theanorc.txt 28 | set THEANORC_=%USERPROFILE%\.theanorc_install.txt 29 | echo [global]> %THEANORC_% 30 | echo openmp=False>> %THEANORC_% 31 | echo.>> %THEANORC_% 32 | echo [blas]>> %THEANORC_% 33 | echo ldflags=>> %THEANORC_% 34 | 35 | if exist %THEANORC% ( 36 | echo A .theanorc.txt config file already exists, so we will not change it. 37 | echo The default version is in %THEANORC_%, we suggest you check it out. 38 | ) else ( 39 | rename %THEANORC_% .theanorc.txt 40 | ) 41 | 42 | :end 43 | echo end 44 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/crei2013/logreg.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import numpy 3 | import theano 4 | import theano.tensor as tt 5 | rng = numpy.random 6 | 7 | N = 400 8 | feats = 784 9 | D = (rng.randn(N, feats), rng.randint(size=N, low=0, high=2)) 10 | training_steps = 10000 11 | 12 | # Declare Theano symbolic variables 13 | x = tt.matrix("x") 14 | y = tt.vector("y") 15 | w = theano.shared(rng.randn(feats), name="w") 16 | b = theano.shared(0., name="b") 17 | print("Initial model:") 18 | print(w.get_value(), b.get_value()) 19 | 20 | # Construct Theano expression graph 21 | p_1 = 1 / (1 + tt.exp(-tt.dot(x, w) - b)) # Probability that target = 1 22 | prediction = p_1 > 0.5 # The prediction thresholded 23 | xent = -y * tt.log(p_1) - (1 - y) * tt.log(1 - p_1) # Cross-entropy loss 24 | cost = xent.mean() + 0.01 * (w ** 2).sum() # The cost to minimize 25 | gw, gb = tt.grad(cost, [w, b]) 26 | 27 | # Compile 28 | train = theano.function( 29 | inputs=[x, y], 30 | outputs=[prediction, xent], 31 | updates=[(w, w - 0.1 * gw), 32 | (b, b - 0.1 * gb)], 33 | name='train') 34 | 35 | predict = theano.function(inputs=[x], outputs=prediction, 36 | name='predict') 37 | 38 | # Train 39 | for i in range(training_steps): 40 | pred, err = train(D[0], D[1]) 41 | 42 | print("Final model:") 43 | print(w.get_value(), b.get_value()) 44 | print("target values for D:", D[1]) 45 | print("prediction on D:", predict(D[0])) 46 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/omlw2014/logreg.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import numpy 3 | import theano 4 | import theano.tensor as tt 5 | rng = numpy.random 6 | 7 | N = 400 8 | feats = 784 9 | D = (rng.randn(N, feats), rng.randint(size=N, low=0, high=2)) 10 | training_steps = 10000 11 | 12 | # Declare Theano symbolic variables 13 | x = tt.matrix("x") 14 | y = tt.vector("y") 15 | w = theano.shared(rng.randn(feats), name="w") 16 | b = theano.shared(0., name="b") 17 | print("Initial model:") 18 | print(w.get_value(), b.get_value()) 19 | 20 | # Construct Theano expression graph 21 | p_1 = 1 / (1 + tt.exp(-tt.dot(x, w) - b)) # Probability that target = 1 22 | prediction = p_1 > 0.5 # The prediction thresholded 23 | xent = -y * tt.log(p_1) - (1 - y) * tt.log(1 - p_1) # Cross-entropy loss 24 | cost = xent.mean() + 0.01 * (w ** 2).sum() # The cost to minimize 25 | gw, gb = tt.grad(cost, [w, b]) 26 | 27 | # Compile 28 | train = theano.function( 29 | inputs=[x, y], 30 | outputs=[prediction, xent], 31 | updates=[(w, w - 0.1 * gw), 32 | (b, b - 0.1 * gb)], 33 | name='train') 34 | 35 | predict = theano.function(inputs=[x], outputs=prediction, 36 | name='predict') 37 | 38 | # Train 39 | for i in range(training_steps): 40 | pred, err = train(D[0], D[1]) 41 | 42 | print("Final model:") 43 | print(w.get_value(), b.get_value()) 44 | print("target values for D:", D[1]) 45 | print("prediction on D:", predict(D[0])) 46 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/tensor/nnet/tests/test_opt.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import theano 3 | from theano import tensor 4 | from theano.tensor.nnet.blocksparse import sparse_block_dot 5 | 6 | 7 | def test_blocksparse_inplace_gemv_opt(): 8 | b = tensor.fmatrix() 9 | W = tensor.ftensor4() 10 | h = tensor.ftensor3() 11 | iIdx = tensor.lmatrix() 12 | oIdx = tensor.lmatrix() 13 | 14 | o = sparse_block_dot(W, h, iIdx, b, oIdx) 15 | 16 | f = theano.function([W, h, iIdx, b, oIdx], o) 17 | assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace') 18 | 19 | if theano.config.mode == "FAST_COMPILE": 20 | assert not f.maker.fgraph.toposort()[-1].op.inplace 21 | else: 22 | assert f.maker.fgraph.toposort()[-1].op.inplace 23 | 24 | 25 | def test_blocksparse_inplace_outer_opt(): 26 | b = tensor.fmatrix() 27 | W = tensor.ftensor4() 28 | h = tensor.ftensor3() 29 | iIdx = tensor.lmatrix() 30 | oIdx = tensor.lmatrix() 31 | 32 | o = sparse_block_dot(W, h, iIdx, b, oIdx) 33 | 34 | theano.printing.debugprint(tensor.grad(o.sum(), wrt=W)) 35 | 36 | f = theano.function([W, h, iIdx, b, oIdx], 37 | [o, tensor.grad(o.sum(), wrt=W)]) 38 | assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace') 39 | 40 | if theano.config.mode == "FAST_COMPILE": 41 | assert not f.maker.fgraph.toposort()[-1].op.inplace 42 | else: 43 | assert f.maker.fgraph.toposort()[-1].op.inplace 44 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/cifarSC2011/boot_camp_overview.txt: -------------------------------------------------------------------------------- 1 | 2 | .. _cifar_summer_school2011_overview: 3 | 4 | 5 | ======== 6 | Schedule 7 | ======== 8 | 9 | 10 | Theano lab sessions will be in 4 x 90 minute blocks, 11 | on the afternoons of Aug 2, 3, 5, and 6 (but not Aug 4th). 12 | 13 | Day 1 14 | ----- 15 | 16 | * Show of hands - what is your background? 17 | 18 | * Python & NumPy in a nutshell 19 | 20 | * Theano basics 21 | 22 | * Quick tour through Deep Learning Tutorials (think about projects) 23 | 24 | .. : 25 | day 1: 26 | I think that I could cover those 2 pages: 27 | * http://deeplearning.net/software/theano/hpcs2011_tutorial/introduction.html 28 | * http://deeplearning.net/software/theano/hpcs2011_tutorial/theano.html 29 | That include: 30 | simple example 31 | linear regression example with shared var 32 | theano flags 33 | grad detail 34 | Symbolic variables 35 | gpu 36 | benchmarck 37 | 38 | Day 2 39 | ----- 40 | 41 | * Loop/Condition in Theano (10-20m) 42 | 43 | * Propose/discuss projects 44 | 45 | * Form groups and start projects! 46 | 47 | Day 3 48 | ----- 49 | 50 | * Advanced Theano (30 minutes) 51 | 52 | * Debugging, profiling, compilation pipeline 53 | 54 | * Projects / General hacking / code-sprinting. 55 | 56 | Day 4 57 | ----- 58 | 59 | * *You choose* (we can split the group) 60 | 61 | * Extending Theano 62 | 63 | * How to write an Op 64 | 65 | * How to use pycuda code in Theano 66 | 67 | * Projects / General hacking / code-sprinting. 68 | 69 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/gof/tests/test_graph_opt_caching.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import os 3 | import numpy 4 | import theano 5 | import theano.tensor as T 6 | 7 | floatX = 'float32' 8 | 9 | 10 | def test_graph_opt_caching(): 11 | opt_db_file = theano.config.compiledir + '/optimized_graphs.pkl' 12 | os.system('rm %s' % opt_db_file) 13 | 14 | mode = theano.config.mode 15 | if mode in ["DEBUG_MODE", "DebugMode"]: 16 | mode = "FAST_RUN" 17 | default = theano.config.cache_optimizations 18 | try: 19 | theano.config.cache_optimizations = True 20 | a = T.fmatrix('a') 21 | b = T.fmatrix('b') 22 | c = theano.shared(numpy.ones((10, 10), dtype=floatX)) 23 | d = theano.shared(numpy.ones((10, 10), dtype=floatX)) 24 | e = T.sum(T.sum(T.sum(a ** 2 + b) + c) + d) 25 | f1 = theano.function([a, b], e, mode=mode) 26 | 27 | m = T.fmatrix('x1') 28 | n = T.fmatrix('x2') 29 | p = theano.shared(numpy.ones((10, 10), dtype=floatX)) 30 | q = theano.shared(numpy.ones((10, 10), dtype=floatX)) 31 | j = T.sum(T.sum(T.sum(m ** 2 + n) + p) + q) 32 | f2 = theano.function([m, n], j, mode=mode) 33 | 34 | in1 = numpy.ones((10, 10), dtype=floatX) 35 | in2 = numpy.ones((10, 10), dtype=floatX) 36 | assert f1(in1, in2) == f2(in1, in2) 37 | finally: 38 | theano.config.cache_optimizations = default 39 | 40 | if __name__ == '__main__': 41 | test_graph_opt_caching() 42 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/benchmark/autoencoder/aa_numpy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import absolute_import, print_function, division 3 | import numpy as N 4 | import sys 5 | import time 6 | from six.moves import xrange 7 | 8 | # c: aa.cc 9 | 10 | neg, nout, nhid, niter = [int(a) for a in sys.argv[1:]] 11 | lr = 0.01 12 | 13 | rng = N.random.RandomState(342) 14 | 15 | w = rng.rand(nout, nhid) 16 | a = rng.randn(nhid) * 0.0 17 | b = rng.randn(nout) * 0.0 18 | x = (rng.rand(neg, nout)-0.5) * 1.5 19 | 20 | dot_time = 0.0 21 | 22 | t = time.time() 23 | for i in xrange(niter): 24 | tt = time.time() 25 | d = N.dot(x, w) 26 | dot_time += time.time() - tt 27 | 28 | hid = N.tanh(d + a) 29 | 30 | tt = time.time() 31 | d = N.dot(hid, w.T) 32 | dot_time += time.time() - tt 33 | out = N.tanh(d + b) 34 | 35 | g_out = out - x 36 | err = 0.5 * N.sum(g_out**2) 37 | 38 | g_hidwt = g_out * (1.0 - out**2) 39 | 40 | b -= lr * N.sum(g_hidwt, axis=0) 41 | 42 | tt = time.time() 43 | g_hid = N.dot(g_hidwt, w) 44 | dot_time += time.time() - tt 45 | 46 | g_hidin = g_hid * (1.0 - hid**2) 47 | 48 | tt = time.time() 49 | d = N.dot(g_hidwt.T, hid) 50 | dd = N.dot(x.T, g_hidin) 51 | dot_time += time.time() - tt 52 | 53 | gw = (d + dd) 54 | w -= lr * gw 55 | 56 | a -= lr * N.sum(g_hidin, axis=0) 57 | 58 | total_time = time.time() - t 59 | print('time: ',total_time, 'err: ', err) 60 | print(' of which', dot_time, 'was spent on dot. Fraction:', dot_time / total_time) 61 | 62 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/tensor/tests/_test_mpi_roundtrip.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | # Run using 3 | # mpiexec -np 2 python _test_mpi_roundtrip.py 4 | 5 | from mpi4py import MPI 6 | import theano 7 | from theano.tensor.io import send, recv, mpi_cmps 8 | from theano.gof.sched import sort_schedule_fn 9 | import numpy as np 10 | from sys import stdout, stderr, exit 11 | 12 | comm = MPI.COMM_WORLD 13 | 14 | rank = comm.Get_rank() 15 | size = comm.Get_size() 16 | 17 | if size != 2: 18 | stderr.write("mpiexec failed to create a world with two nodes.\n" 19 | "Closing with success message.") 20 | stdout.write("True") 21 | exit(0) 22 | 23 | shape = (2, 2) 24 | dtype = 'float32' 25 | 26 | scheduler = sort_schedule_fn(*mpi_cmps) 27 | mode = theano.Mode(optimizer=None, 28 | linker=theano.OpWiseCLinker(schedule=scheduler)) 29 | 30 | if rank == 0: 31 | x = theano.tensor.matrix('x', dtype=dtype) 32 | y = x + 1 33 | send_request = send(y, 1, 11) 34 | 35 | z = recv(shape, dtype, 1, 12) 36 | 37 | f = theano.function([x], [send_request, z], mode=mode) 38 | 39 | xx = np.random.rand(*shape).astype(dtype) 40 | expected = (xx + 1) * 2 41 | 42 | _, zz = f(xx) 43 | 44 | same = np.linalg.norm(zz - expected) < .001 45 | stdout.write(str(same)) 46 | 47 | if rank == 1: 48 | 49 | y = recv(shape, dtype, 0, 11) 50 | z = y * 2 51 | send_request = send(z, 0, 12) 52 | 53 | f = theano.function([], send_request, mode=mode) 54 | 55 | f() 56 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/sandbox/max_gotcha.txt: -------------------------------------------------------------------------------- 1 | .. _sandbox_maxgotcha: 2 | 3 | ========== 4 | Max Gotcha 5 | ========== 6 | 7 | Guillaume and I just found a bug in some experiment code that was 8 | basically caused by confusing semantics of max(). The same sort of 9 | thing applies to min. This is an FYI email to help others on the list 10 | avoid this mistake, which is (I think) easy to make. 11 | 12 | Python's max() function takes multiple arguments and returns the 13 | largest one of them. (I won't go into the details of how it deals with 14 | corner cases.) 15 | 16 | IN CONTRAST 17 | 18 | numpy's max() function takes multiple arguments and returns the 19 | largest element[s] from the *first* argument. The second argument is 20 | used to identify the axis along which to evaluate the [python-style] 21 | max. The third argument is an array into which the result can be 22 | written. 23 | 24 | So for example: 25 | 26 | .. doctest:: 27 | :options: +SKIP 28 | 29 | >>> import numpy 30 | >>> max(3, 4) 31 | 4 32 | >>> numpy.max(3, 4) # This is an error 33 | 3 34 | >>> a, b, c = [numpy.asarray(i) for i in [0, 1, 2]] 35 | >>> numpy.max(a, b, c) # This is an error 36 | 0 37 | >>> c 38 | array(0) 39 | 40 | Be careful! 41 | 42 | Theano defines a max function (called theano.tensor.max) that is 43 | similar numpy's max. 44 | 45 | Theano also defines a function called theano.tensor.largest that is 46 | closer to python's, but not identical since it works elemwise for 47 | tensors. There is a corresponding 'smallest' function that is like 48 | min() 49 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/benchmark/convolution/conv2d.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import sys, timeit, time 3 | import numpy 4 | import theano, theano.tensor.signal.conv 5 | 6 | try: 7 | img_shape = int(sys.argv[1]), int(sys.argv[2]) 8 | ker_shape = int(sys.argv[3]), int(sys.argv[4]) 9 | dtype = sys.argv[5] 10 | except: 11 | print("Usage: %s [nb_call]" % sys.argv[0], file=sys.stderr) 12 | sys.exit(-1) 13 | 14 | nb_call = 1 15 | if len(sys.argv)>6: 16 | nb_call=int(sys.argv[6]) 17 | 18 | setup=""" 19 | import sys, timeit, time 20 | import numpy 21 | import theano, theano.tensor.signal.conv 22 | 23 | img_shape = int(sys.argv[1]), int(sys.argv[2]) 24 | ker_shape = int(sys.argv[3]), int(sys.argv[4]) 25 | dtype = sys.argv[5] 26 | 27 | img = theano.shared(numpy.ones(img_shape, dtype=dtype)) 28 | ker = theano.shared(numpy.ones(ker_shape, dtype=dtype)) 29 | out = theano.shared(numpy.ones((2,2,2), dtype=dtype)) 30 | """ 31 | 32 | T = timeit.Timer("f()", 33 | setup+"f = theano.function([], theano.tensor.signal.conv.conv2d(img, ker))") 34 | time_without_shape = T.repeat(repeat=3, number=nb_call) 35 | print(min(time_without_shape), 'theano without shape') 36 | 37 | T = timeit.Timer("f()", setup+"""f = theano.function([], [], 38 | updates={out:theano.tensor.signal.conv.conv2d(img, 39 | ker,image_shape=img_shape,filter_shape=ker_shape)})""") 40 | time_with_shape = T.repeat(repeat=3, number=nb_call) 41 | 42 | print(min(time_with_shape), 'theano with shape') 43 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sandbox/gpuarray/dnn_conv_base.c: -------------------------------------------------------------------------------- 1 | #section support_code_struct 2 | cudnnTensorDescriptor_t APPLY_SPECIFIC(input); 3 | cudnnTensorDescriptor_t APPLY_SPECIFIC(output); 4 | cudnnFilterDescriptor_t APPLY_SPECIFIC(kerns); 5 | 6 | #section init_code_struct 7 | 8 | cudnnStatus_t APPLY_SPECIFIC(err); 9 | APPLY_SPECIFIC(input) = NULL; 10 | APPLY_SPECIFIC(output) = NULL; 11 | APPLY_SPECIFIC(kerns) = NULL; 12 | if ((APPLY_SPECIFIC(err) = cudnnCreateTensorDescriptor(&APPLY_SPECIFIC(input))) != CUDNN_STATUS_SUCCESS) { 13 | PyErr_Format(PyExc_MemoryError, "could not allocate tensor descriptor " 14 | "(inp): %s", cudnnGetErrorString(APPLY_SPECIFIC(err))); 15 | FAIL; 16 | } 17 | if ((APPLY_SPECIFIC(err) = cudnnCreateTensorDescriptor(&APPLY_SPECIFIC(output))) != CUDNN_STATUS_SUCCESS) { 18 | PyErr_Format(PyExc_MemoryError, "could not allocate tensor descriptor " 19 | "(out): %s", cudnnGetErrorString(APPLY_SPECIFIC(err))); 20 | FAIL; 21 | } 22 | if ((APPLY_SPECIFIC(err) = cudnnCreateFilterDescriptor(&APPLY_SPECIFIC(kerns))) != CUDNN_STATUS_SUCCESS) { 23 | PyErr_Format(PyExc_MemoryError, "could not allocate filter descriptor: %s", 24 | cudnnGetErrorString(APPLY_SPECIFIC(err))); 25 | FAIL; 26 | } 27 | 28 | #section cleanup_code_struct 29 | 30 | if (APPLY_SPECIFIC(input) != NULL) 31 | cudnnDestroyTensorDescriptor(APPLY_SPECIFIC(input)); 32 | if (APPLY_SPECIFIC(output) != NULL) 33 | cudnnDestroyTensorDescriptor(APPLY_SPECIFIC(output)); 34 | if (APPLY_SPECIFIC(kerns) != NULL) 35 | cudnnDestroyFilterDescriptor(APPLY_SPECIFIC(kerns)); 36 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/typed_list.txt: -------------------------------------------------------------------------------- 1 | .. _libdoc_typed_list: 2 | 3 | =============================== 4 | :mod:`typed_list` -- Typed List 5 | =============================== 6 | 7 | .. note:: 8 | 9 | This has been added in release 0.7. 10 | 11 | .. note:: 12 | 13 | This works, but is not well integrated with the rest of Theano. If 14 | speed is important, it is probably better to pad to a dense 15 | tensor. 16 | 17 | This is a type that represents a list in Theano. All elements must have 18 | the same Theano type. Here is an example: 19 | 20 | >>> import theano.typed_list 21 | >>> tl = theano.typed_list.TypedListType(theano.tensor.fvector)() 22 | >>> v = theano.tensor.fvector() 23 | >>> o = theano.typed_list.append(tl, v) 24 | >>> f = theano.function([tl, v], o) 25 | >>> f([[1, 2, 3], [4, 5]], [2]) 26 | [array([ 1., 2., 3.], dtype=float32), array([ 4., 5.], dtype=float32), array([ 2.], dtype=float32)] 27 | 28 | A second example with Scan. Scan doesn't yet have direct support of 29 | TypedList, so you can only use it as non_sequences (not in sequences or 30 | as outputs): 31 | 32 | >>> import theano.typed_list 33 | >>> a = theano.typed_list.TypedListType(theano.tensor.fvector)() 34 | >>> l = theano.typed_list.length(a) 35 | >>> s, _ = theano.scan(fn=lambda i, tl: tl[i].sum(), 36 | ... non_sequences=[a], 37 | ... sequences=[theano.tensor.arange(l, dtype='int64')]) 38 | >>> f = theano.function([a], s) 39 | >>> f([[1, 2, 3], [4, 5]]) 40 | array([ 6., 9.], dtype=float32) 41 | 42 | .. automodule:: theano.typed_list.basic 43 | :members: 44 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sandbox/gpuarray/conv_desc.c: -------------------------------------------------------------------------------- 1 | #section support_code_apply 2 | 3 | int APPLY_SPECIFIC(conv_desc)(PyArrayObject *filt_shp, 4 | cudnnConvolutionDescriptor_t *desc) { 5 | cudnnStatus_t err; 6 | int pad[3] = {PAD_0, PAD_1, PAD_2}; 7 | int strides[3] = {SUB_0, SUB_1, SUB_2}; 8 | int upscale[3] = {1, 1, 1}; 9 | 10 | #if BORDER_MODE == 0 11 | pad[0] = *(npy_int64 *)PyArray_GETPTR1(filt_shp, 2) - 1; 12 | pad[1] = *(npy_int64 *)PyArray_GETPTR1(filt_shp, 3) - 1; 13 | #if NB_DIMS > 2 14 | pad[2] = *(npy_int64 *)PyArray_GETPTR1(filt_shp, 4) - 1; 15 | #endif 16 | #elif BORDER_MODE == 2 17 | pad[0] = *(npy_int64 *)PyArray_GETPTR1(filt_shp, 2) / 2; 18 | pad[1] = *(npy_int64 *)PyArray_GETPTR1(filt_shp, 3) / 2; 19 | #if NB_DIMS > 2 20 | pad[2] = *(npy_int64 *)PyArray_GETPTR1(filt_shp, 4) / 2; 21 | #endif 22 | #endif 23 | 24 | if (PyArray_DIM(filt_shp, 0) - 2 != NB_DIMS) { 25 | PyErr_Format(PyExc_ValueError, "Filter shape has too many dimensions: " 26 | "expected %d, got %lld.", NB_DIMS, 27 | (long long)PyArray_DIM(filt_shp, 0)); 28 | return -1; 29 | } 30 | 31 | err = cudnnCreateConvolutionDescriptor(desc); 32 | if (err != CUDNN_STATUS_SUCCESS) { 33 | PyErr_Format(PyExc_MemoryError, "could not allocate convolution " 34 | "descriptor: %s", cudnnGetErrorString(err)); 35 | return -1; 36 | } 37 | 38 | err = cudnnSetConvolutionNdDescriptor_v3(*desc, NB_DIMS, pad, strides, 39 | upscale, CONV_MODE, PRECISION); 40 | return 0; 41 | } 42 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/tutorial/index.txt: -------------------------------------------------------------------------------- 1 | 2 | .. _tutorial: 3 | 4 | ======== 5 | Tutorial 6 | ======== 7 | 8 | Let us start an interactive session (e.g. with ``python`` or ``ipython``) and import Theano. 9 | 10 | >>> from theano import * 11 | 12 | Several of the symbols you will need to use are in the ``tensor`` subpackage 13 | of Theano. Let us import that subpackage under a handy name like 14 | ``T`` (the tutorials will frequently use this convention). 15 | 16 | >>> import theano.tensor as T 17 | 18 | If that succeeded you are ready for the tutorial, otherwise check your 19 | installation (see :ref:`install`). 20 | 21 | Throughout the tutorial, bear in mind that there is a :ref:`glossary` as well 22 | as *index* and *modules* links in the upper-right corner of each page to help 23 | you out. 24 | 25 | Prerequisites 26 | ------------- 27 | .. toctree:: 28 | 29 | python 30 | numpy 31 | 32 | Basics 33 | ------ 34 | 35 | .. toctree:: 36 | 37 | adding 38 | examples 39 | gradients 40 | conditions 41 | loop 42 | shape_info 43 | 44 | Advanced 45 | -------- 46 | 47 | .. toctree:: 48 | 49 | sparse 50 | using_gpu 51 | using_multi_gpu 52 | 53 | Advanced configuration and debugging 54 | ------------------------------------ 55 | 56 | .. toctree:: 57 | 58 | modes 59 | printing_drawing 60 | debug_faq 61 | nan_tutorial 62 | profiling 63 | 64 | Further readings 65 | ---------------- 66 | 67 | .. toctree:: 68 | 69 | ../extending/graphstructures 70 | loading_and_saving 71 | gpu_data_convert 72 | aliasing 73 | python-memory-management 74 | multi_cores 75 | faq_tutorial 76 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/tensor/tests/test_type_other.py: -------------------------------------------------------------------------------- 1 | """ This file don't test everything. It only test one past crash error.""" 2 | from __future__ import absolute_import, print_function, division 3 | import theano 4 | from theano.gof import Constant 5 | from theano.tensor.type_other import MakeSlice, make_slice, NoneTypeT, NoneConst 6 | 7 | 8 | def test_make_slice_merge(): 9 | # In the past, this was crahsing during compilation. 10 | i = theano.tensor.iscalar() 11 | s1 = make_slice(0, i) 12 | s2 = make_slice(0, i) 13 | f = theano.function([i], [s1, s2]) 14 | nodes = f.maker.fgraph.apply_nodes 15 | assert len([n for n in nodes if isinstance(n.op, MakeSlice)]) == 1 16 | theano.printing.debugprint(f) 17 | 18 | 19 | def test_none_Constant(): 20 | """ Tests equals 21 | 22 | We had an error in the past with unpickling 23 | """ 24 | o1 = Constant(NoneTypeT(), None, name='NoneConst') 25 | o2 = Constant(NoneTypeT(), None, name='NoneConst') 26 | assert o1.equals(o2) 27 | assert NoneConst.equals(o1) 28 | assert o1.equals(NoneConst) 29 | assert NoneConst.equals(o2) 30 | assert o2.equals(NoneConst) 31 | 32 | # This trigger equals that returned the wrong answer in the past. 33 | import six.moves.cPickle as pickle 34 | import theano 35 | from theano import tensor 36 | 37 | x = tensor.vector('x') 38 | y = tensor.argmax(x) 39 | kwargs = {} 40 | # We can't pickle DebugMode 41 | if theano.config.mode in ["DebugMode", "DEBUG_MODE"]: 42 | kwargs = {'mode': 'FAST_RUN'} 43 | f = theano.function([x], [y], **kwargs) 44 | pickle.loads(pickle.dumps(f)) 45 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/scan_module/numpy_api_changes.diff: -------------------------------------------------------------------------------- 1 | @@ -6667,7 +6667,7 @@ 2 | * cdef list stack 3 | * cdef int offset 4 | */ 5 | - __pyx_t_3 = ((PyObject *)__pyx_v_self->descr); 6 | + __pyx_t_3 = ((PyObject *)PyArray_DESCR(__pyx_v_self)); 7 | __Pyx_INCREF(__pyx_t_3); 8 | __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3); 9 | __pyx_t_3 = 0; 10 | @@ -8237,7 +8237,7 @@ 11 | * arr.base = baseptr 12 | * 13 | */ 14 | - Py_XDECREF(__pyx_v_arr->base); 15 | + Py_XDECREF(PyArray_BASE(__pyx_v_arr)); 16 | 17 | /* "numpy.pxd":973 18 | * baseptr = base 19 | @@ -8246,7 +8246,11 @@ 20 | * 21 | * cdef inline object get_array_base(ndarray arr): 22 | */ 23 | - __pyx_v_arr->base = __pyx_v_baseptr; 24 | + #if NPY_API_VERSION < 0x00000007 25 | + PyArray_BASE(__pyx_v_arr) = __pyx_v_baseptr; 26 | + #else 27 | + PyArray_SetBaseObject(__pyx_v_arr, __pyx_v_baseptr); 28 | + #endif 29 | 30 | __Pyx_RefNannyFinishContext(); 31 | } 32 | @@ -8285,7 +8285,7 @@ 33 | * return None 34 | * else: 35 | */ 36 | - __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0); 37 | + __pyx_t_1 = ((PyArray_BASE(__pyx_v_arr) == NULL) != 0); 38 | if (__pyx_t_1) { 39 | 40 | /* "numpy.pxd":977 41 | @@ -8307,8 +8311,8 @@ 42 | * return arr.base # <<<<<<<<<<<<<< 43 | */ 44 | __Pyx_XDECREF(__pyx_r); 45 | - __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); 46 | - __pyx_r = ((PyObject *)__pyx_v_arr->base); 47 | + __Pyx_INCREF(((PyObject *)PyArray_BASE(__pyx_v_arr))); 48 | + __pyx_r = ((PyObject *)PyArray_BASE(__pyx_v_arr)); 49 | goto __pyx_L0; 50 | } 51 | __pyx_L3:; 52 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/acknowledgement.txt: -------------------------------------------------------------------------------- 1 | .. _acknowledgement: 2 | 3 | 4 | Acknowledgements 5 | ================ 6 | 7 | .. note: 8 | 9 | This page is in construction. We are missing sources. 10 | 11 | 12 | * The developers of `NumPy `_. Theano is based on its ndarray object and uses much of its implementation. 13 | * The developers of `SciPy `_. Our sparse matrix support uses their sparse matrix objects. We also reuse other parts. 14 | * All `Theano contributors `_. 15 | * All Theano users that have given us feedback. 16 | * The GPU implementation of tensordot is based on code from Tijmen 17 | Tieleman's `gnumpy `_ 18 | * The original version of the function ``cpuCount()`` in the file 19 | `theano/misc/cpucount.py` come from the project `pyprocessing 20 | `_. It is available under the same license 21 | as Theano. 22 | * Our random number generator implementation on CPU and GPU uses the MRG31k3p algorithm that is described in: 23 | 24 | P. L'Ecuyer and R. Touzin, `Fast Combined Multiple Recursive Generators with Multipliers of the form a = +/- 2^d +/- 2^e `_, Proceedings of the 2000 Winter Simulation Conference, Dec. 2000, 683--689. 25 | 26 | We were authorized by Pierre L'Ecuyer to copy/modify his Java implementation in the `SSJ `_ software and to relicense it under BSD 3-Clauses in Theano. 27 | * A better GPU memory allocator :attr:`CNMeM ` is included in Theano. It has the same license. 28 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/extending/tips.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | ==== 4 | Tips 5 | ==== 6 | 7 | 8 | Reusing outputs 9 | =============== 10 | 11 | WRITEME 12 | 13 | 14 | Don't define new Ops unless you have to 15 | ======================================= 16 | 17 | It is usually not useful to define Ops that can be easily 18 | implemented using other already existing Ops. For example, instead of 19 | writing a "sum_square_difference" Op, you should probably just write a 20 | simple function: 21 | 22 | .. testcode:: 23 | 24 | from theano import tensor as T 25 | 26 | def sum_square_difference(a, b): 27 | return T.sum((a - b)**2) 28 | 29 | Even without taking Theano's optimizations into account, it is likely 30 | to work just as well as a custom implementation. It also supports all 31 | data types, tensors of all dimensions as well as broadcasting, whereas 32 | a custom implementation would probably only bother to support 33 | contiguous vectors/matrices of doubles... 34 | 35 | 36 | Use Theano's high order Ops when applicable 37 | =========================================== 38 | 39 | Theano provides some generic Op classes which allow you to generate a 40 | lot of Ops at a lesser effort. For instance, Elemwise can be used to 41 | make :term:`elementwise` operations easily whereas DimShuffle can be 42 | used to make transpose-like transformations. These higher order Ops 43 | are mostly Tensor-related, as this is Theano's specialty. 44 | 45 | 46 | .. _opchecklist: 47 | 48 | Op Checklist 49 | ============ 50 | 51 | Use this list to make sure you haven't forgotten anything when 52 | defining a new Op. It might not be exhaustive but it covers a lot of 53 | common mistakes. 54 | 55 | WRITEME 56 | 57 | 58 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/tests/test_config.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test config options. 3 | """ 4 | from __future__ import absolute_import, print_function, division 5 | import unittest 6 | from theano.configparser import AddConfigVar, ConfigParam, THEANO_FLAGS_DICT 7 | 8 | 9 | class T_config(unittest.TestCase): 10 | 11 | def test_invalid_default(self): 12 | # Ensure an invalid default value found in the Theano code only causes 13 | # a crash if it is not overridden by the user. 14 | 15 | def filter(val): 16 | if val == 'invalid': 17 | raise ValueError() 18 | else: 19 | return val 20 | 21 | try: 22 | # This should raise a ValueError because the default value is 23 | # invalid. 24 | AddConfigVar( 25 | 'T_config.test_invalid_default_a', 26 | doc='unittest', 27 | configparam=ConfigParam('invalid', filter=filter), 28 | in_c_key=False) 29 | assert False 30 | except ValueError: 31 | pass 32 | 33 | THEANO_FLAGS_DICT['T_config.test_invalid_default_b'] = 'ok' 34 | # This should succeed since we defined a proper value, even 35 | # though the default was invalid. 36 | AddConfigVar('T_config.test_invalid_default_b', 37 | doc='unittest', 38 | configparam=ConfigParam('invalid', filter=filter), 39 | in_c_key=False) 40 | 41 | # Check that the flag has been removed 42 | assert 'T_config.test_invalid_default_b' not in THEANO_FLAGS_DICT 43 | 44 | # TODO We should remove these dummy options on test exit. 45 | -------------------------------------------------------------------------------- /software/enas/src/ptb/data_utils.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import numpy as np 3 | import tensorflow as tf 4 | 5 | 6 | def ptb_input_producer(raw_data, batch_size, num_steps, shuffle=False, 7 | randomize=False): 8 | """ 9 | Args: 10 | raw_data: np tensor of size [num_words]. 11 | batch_size: self-explained. 12 | num_steps: number of BPTT steps. 13 | """ 14 | 15 | num_batches_per_epoch = ((np.size(raw_data) // batch_size) - 1) // num_steps 16 | raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32) 17 | 18 | data_len = tf.size(raw_data) 19 | batch_len = data_len // batch_size 20 | data = tf.reshape(raw_data[0 : batch_size * batch_len], 21 | [batch_size, batch_len]) 22 | 23 | epoch_size = (batch_len - 1) // num_steps 24 | with tf.device("/cpu:0"): 25 | epoch_size = tf.identity(epoch_size, name="epoch_size") 26 | 27 | if randomize: 28 | i = tf.random_uniform([1], minval=0, maxval=batch_len - num_steps, 29 | dtype=tf.int32) 30 | i = tf.reduce_sum(i) 31 | x = tf.strided_slice( 32 | data, [0, i], [batch_size, i + num_steps]) 33 | y = tf.strided_slice( 34 | data, [0, i + 1], [batch_size, i + num_steps + 1]) 35 | else: 36 | i = tf.train.range_input_producer(epoch_size, shuffle=shuffle).dequeue() 37 | x = tf.strided_slice( 38 | data, [0, i * num_steps], [batch_size, (i + 1) * num_steps]) 39 | y = tf.strided_slice( 40 | data, [0, i * num_steps + 1], [batch_size, (i + 1) * num_steps + 1]) 41 | 42 | x.set_shape([batch_size, num_steps]) 43 | y.set_shape([batch_size, num_steps]) 44 | 45 | return x, y, num_batches_per_epoch 46 | 47 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/compile/tests/test_nanguardmode.py: -------------------------------------------------------------------------------- 1 | """ 2 | This test is for testing the NanGuardMode. 3 | """ 4 | from __future__ import absolute_import, print_function, division 5 | 6 | import logging 7 | from nose.tools import assert_raises 8 | 9 | import numpy 10 | 11 | from theano.compile.nanguardmode import NanGuardMode 12 | import theano 13 | import theano.tensor as T 14 | 15 | 16 | def test_NanGuardMode(): 17 | """ 18 | Tests if NanGuardMode is working by feeding in numpy.inf and numpy.nans 19 | intentionally. A working implementation should be able to capture all 20 | the abnormalties. 21 | """ 22 | x = T.matrix() 23 | w = theano.shared(numpy.random.randn(5, 7).astype(theano.config.floatX)) 24 | y = T.dot(x, w) 25 | 26 | fun = theano.function( 27 | [x], y, 28 | mode=NanGuardMode(nan_is_error=True, inf_is_error=True) 29 | ) 30 | a = numpy.random.randn(3, 5).astype(theano.config.floatX) 31 | infa = numpy.tile( 32 | (numpy.asarray(100.) ** 1000000).astype(theano.config.floatX), (3, 5)) 33 | nana = numpy.tile( 34 | numpy.asarray(numpy.nan).astype(theano.config.floatX), (3, 5)) 35 | biga = numpy.tile( 36 | numpy.asarray(1e20).astype(theano.config.floatX), (3, 5)) 37 | 38 | fun(a) # normal values 39 | 40 | # Temporarily silence logger 41 | _logger = logging.getLogger("theano.compile.nanguardmode") 42 | try: 43 | _logger.propagate = False 44 | assert_raises(AssertionError, fun, infa) # INFs 45 | assert_raises(AssertionError, fun, nana) # NANs 46 | assert_raises(AssertionError, fun, biga) # big values 47 | finally: 48 | _logger.propagate = True 49 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/core_development_guide.txt: -------------------------------------------------------------------------------- 1 | :orphan: 2 | 3 | Core Development Guide 4 | ======================= 5 | 6 | The documentation of the core components of Theano is still a work in 7 | progress. For now this is a list of bits and pieces on the subject, 8 | some of them might be outdated though: 9 | 10 | 11 | * :ref:`theano_type` -- Tutorial for writing a new type in Theano. It 12 | introduces the basics concerning Theano datatypes. 13 | 14 | * :ref:`theano_ctype` -- Tutorial on how to make your type C-friendly. 15 | 16 | * :ref:`views_and_inplace` -- This is somewhere between extending Theano and 17 | describing how Theano works internally; it talks about views and inplace 18 | operations. 19 | 20 | * :ref:`optimization` -- Tutorial on how optimization work in Theano. 21 | 22 | * :ref:`pipeline` -- Describes the steps of compiling a Theano Function. 23 | 24 | * :ref:`graphstructures` -- Describes the symbolic graphs generated by 25 | :mod:`theano.scan`. 26 | 27 | * :ref:`unittest` -- Tutorial on how to use unittest in testing Theano. 28 | 29 | * :ref:`sandbox_broadcasting` -- Short description of what a broadcastable 30 | pattern is. 31 | 32 | * :ref:`sandbox_debugging_step_mode` -- How to step through the execution of 33 | a Theano function and print the inputs and outputs of each op. 34 | 35 | * :ref:`sandbox_elemwise` -- Description of element wise operations. 36 | 37 | * :ref:`sandbox_maxgotcha` -- Describes the difference between ``numpy.max`` 38 | and Python max (something to consider when using max). 39 | 40 | * :ref:`sandbox_randnb` -- Description of how Theano deals with random 41 | numbers. 42 | 43 | * :ref:`sparse` -- Description of the ``sparse`` type in Theano. 44 | 45 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/README.txt: -------------------------------------------------------------------------------- 1 | To install the package, see this page: 2 | 3 | http://deeplearning.net/software/theano/install.html 4 | 5 | For the documentation, see the project website: 6 | 7 | http://deeplearning.net/software/theano/ 8 | 9 | Related Projects: 10 | 11 | https://github.com/Theano/Theano/wiki/Related-projects 12 | 13 | We recommend you look at the documentation on the website, since it 14 | will be more current than the documentation included with the package. 15 | If you really wish to build the documentation yourself, you will need 16 | sphinx. Issue the following command: 17 | 18 | python ./doc/scripts/docgen.py 19 | 20 | Documentation is built into html/ 21 | The PDF of the documentation is html/theano.pdf 22 | 23 | 24 | DIRECTORY LAYOUT 25 | 26 | Theano (current directory) is the distribution directory. 27 | * Theano/theano contains the package 28 | * Theano/theano has several submodules: 29 | * gof + compile are the core 30 | * scalar depends upon core 31 | * tensor depends upon scalar 32 | * sparse depends upon tensor 33 | * sandbox can depend on everything else 34 | * Theano/examples are copies of the example on the wiki 35 | * Theano/benchmark and Theano/examples are in the distribution, but not in 36 | the Python package 37 | * Theano/bin contains executable scripts that are copied to the bin folder 38 | when the Python package is installed 39 | * Tests are distributed and are part of the package, i.e. fall in 40 | the appropriate submodules 41 | * Theano/doc contains files and scripts used to generate the documentation 42 | * Theano/html is the place where the documentation will be generated 43 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/gof/tests/test_optdb.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | from unittest import TestCase 3 | 4 | from theano.compat import exc_message 5 | from theano.gof.optdb import opt, DB 6 | 7 | 8 | class Test_DB(TestCase): 9 | 10 | def test_0(self): 11 | 12 | class Opt(opt.Optimizer): # inheritance buys __hash__ 13 | name = 'blah' 14 | 15 | db = DB() 16 | db.register('a', Opt()) 17 | 18 | db.register('b', Opt()) 19 | 20 | db.register('c', Opt(), 'z', 'asdf') 21 | 22 | self.assertTrue('a' in db) 23 | self.assertTrue('b' in db) 24 | self.assertTrue('c' in db) 25 | 26 | try: 27 | db.register('c', Opt()) # name taken 28 | self.fail() 29 | except ValueError as e: 30 | if exc_message(e).startswith("The name"): 31 | pass 32 | else: 33 | raise 34 | except Exception: 35 | self.fail() 36 | 37 | try: 38 | db.register('z', Opt()) # name collides with tag 39 | self.fail() 40 | except ValueError as e: 41 | if exc_message(e).startswith("The name"): 42 | pass 43 | else: 44 | raise 45 | except Exception: 46 | self.fail() 47 | 48 | try: 49 | db.register('u', Opt(), 'b') # name new but tag collides with name 50 | self.fail() 51 | except ValueError as e: 52 | if exc_message(e).startswith("The tag"): 53 | pass 54 | else: 55 | raise 56 | except Exception: 57 | self.fail() 58 | -------------------------------------------------------------------------------- /software/enas/src/ptb/ptb_ops.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | from tensorflow.python.training import moving_averages 4 | 5 | from src.common_ops import create_weight 6 | from src.common_ops import create_bias 7 | 8 | def layer_norm(x, is_training, name="layer_norm"): 9 | x = tf.contrib.layers.layer_norm(x, scope=name, 10 | reuse=None if is_training else True) 11 | return x 12 | 13 | def batch_norm(x, is_training, name="batch_norm", decay=0.999, epsilon=1.0): 14 | shape = x.get_shape()[1] 15 | with tf.variable_scope(name, reuse=None if is_training else True): 16 | offset = tf.get_variable( 17 | "offset", shape, 18 | initializer=tf.constant_initializer(0.0, dtype=tf.float32)) 19 | scale = tf.get_variable( 20 | "scale", shape, 21 | initializer=tf.constant_initializer(1.0, dtype=tf.float32)) 22 | moving_mean = tf.get_variable( 23 | "moving_mean", shape, trainable=False, 24 | initializer=tf.constant_initializer(0.0, dtype=tf.float32)) 25 | moving_variance = tf.get_variable( 26 | "moving_variance", shape, trainable=False, 27 | initializer=tf.constant_initializer(1.0, dtype=tf.float32)) 28 | 29 | if is_training: 30 | mean, variance = tf.nn.moments(x, [0]) 31 | update_mean = moving_averages.assign_moving_average( 32 | moving_mean, mean, decay) 33 | update_variance = moving_averages.assign_moving_average( 34 | moving_variance, variance, decay) 35 | 36 | with tf.control_dependencies([update_mean, update_variance]): 37 | x = scale * (x - mean) / tf.sqrt(epsilon + variance) + offset 38 | else: 39 | x = scale * (x - moving_mean) / tf.sqrt(epsilon + moving_variance) + offset 40 | return x 41 | 42 | 43 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/d3viz/css/d3viz.css: -------------------------------------------------------------------------------- 1 | svg { 2 | margin-left:auto; 3 | margin-right:auto; 4 | display:block; 5 | position: fixed; 6 | border: 0px solid black; 7 | top: 32px; left:0%; right:0%; bottom:0%; 8 | } 9 | 10 | .menuBar { 11 | border-bottom: 1px solid black; 12 | height: 22px; 13 | } 14 | 15 | input.menuBar { 16 | } 17 | 18 | .nodeRect { 19 | stroke: black; 20 | border: 3px solid black; 21 | } 22 | 23 | .nodeEllipse { 24 | stroke: black; 25 | border: 3px solid black; 26 | } 27 | 28 | .nodeText { 29 | color: black; 30 | } 31 | 32 | .edge { 33 | stroke-width: 3px; 34 | cursor: pointer; 35 | opacity: 0.4; 36 | } 37 | 38 | .edgeLabelRect { 39 | stroke: black; 40 | border: 1px solid black; 41 | fill: skyblue; 42 | opacity: 0.9; 43 | } 44 | 45 | .edgeLabelText { 46 | fill: black; 47 | text-anchor: start; 48 | } 49 | 50 | .arrowHead { 51 | stroke: green; 52 | stroke-width: 1px; 53 | } 54 | 55 | .arrowHead_n { 56 | stroke: green; 57 | } 58 | 59 | .arrowHead_r { 60 | stroke-width: 3px; 61 | fill: red; 62 | stroke: red; 63 | } 64 | 65 | .arrowHead_b { 66 | stroke: dodgerblue; 67 | } 68 | 69 | .edgeTooltip { 70 | position: absolute; 71 | text-align: center; 72 | vertical-align: middle; 73 | min-width: 10px; 74 | min-height: 10px; 75 | padding: 5px; 76 | background: lightsteelblue; 77 | border: 1px solid black; 78 | border-radius: 8px; 79 | pointer-events: none; 80 | } 81 | 82 | .nodeInfo { 83 | position: absolute; 84 | text-align: left; 85 | vertical-align: middle; 86 | min-width: 10px; 87 | min-height: 10px; 88 | padding: 5px; 89 | background: lightsteelblue; 90 | border: 1px solid black; 91 | border-radius: 8px; 92 | pointer-events: none; 93 | } 94 | 95 | path.hull { 96 | fill: lightsteelblue; 97 | fill-opacity: 0.3; 98 | } -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/misc/doubleop.py: -------------------------------------------------------------------------------- 1 | # This is the example in the Theano/doc/tutorial/extending_theano.txt 2 | from __future__ import absolute_import, print_function, division 3 | 4 | import theano 5 | 6 | 7 | class DoubleOp(theano.Op): 8 | """ Double each element of a tensor. 9 | 10 | :param x: input tensor. 11 | 12 | :return: a tensor of the same shape and dtype as the input with all 13 | values doubled. 14 | 15 | :note: 16 | this is a test note 17 | 18 | :seealso: 19 | You can use the elemwise op to replace this example. 20 | Just execute `x * 2` with x being a Theano variable. 21 | 22 | .. versionadded:: 0.6 23 | """ 24 | def __eq__(self, other): 25 | return type(self) == type(other) 26 | 27 | def __hash__(self): 28 | return hash(type(self)) 29 | 30 | def __str__(self): 31 | return self.__class__.__name__ 32 | 33 | def make_node(self, x): 34 | x = theano.tensor.as_tensor_variable(x) 35 | return theano.Apply(self, [x], [x.type()]) 36 | 37 | def perform(self, node, inputs, output_storage): 38 | x = inputs[0] 39 | z = output_storage[0] 40 | z[0] = x * 2 41 | 42 | def infer_shape(self, node, i0_shapes): 43 | return i0_shapes 44 | 45 | def grad(self, inputs, output_grads): 46 | return [output_grads[0] * 2] 47 | 48 | def R_op(self, inputs, eval_points): 49 | # R_op can receive None as eval_points. 50 | # That means there is no differentiable path through that input. 51 | # If this implies that you cannot compute some outputs, 52 | # return None for those. 53 | if eval_points[0] is None: 54 | return eval_points 55 | return self.grad(inputs, eval_points) 56 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/doc/library/d3viz/examples/d3viz/css/d3viz.css: -------------------------------------------------------------------------------- 1 | svg { 2 | margin-left:auto; 3 | margin-right:auto; 4 | display:block; 5 | position: fixed; 6 | border: 0px solid black; 7 | top: 32px; left:0%; right:0%; bottom:0%; 8 | } 9 | 10 | .menuBar { 11 | border-bottom: 1px solid black; 12 | height: 22px; 13 | } 14 | 15 | input.menuBar { 16 | } 17 | 18 | .nodeRect { 19 | stroke: black; 20 | border: 3px solid black; 21 | } 22 | 23 | .nodeEllipse { 24 | stroke: black; 25 | border: 3px solid black; 26 | } 27 | 28 | .nodeText { 29 | color: black; 30 | } 31 | 32 | .edge { 33 | stroke-width: 3px; 34 | cursor: pointer; 35 | opacity: 0.4; 36 | } 37 | 38 | .edgeLabelRect { 39 | stroke: black; 40 | border: 1px solid black; 41 | fill: skyblue; 42 | opacity: 0.9; 43 | } 44 | 45 | .edgeLabelText { 46 | fill: black; 47 | text-anchor: start; 48 | } 49 | 50 | .arrowHead { 51 | stroke: green; 52 | stroke-width: 1px; 53 | } 54 | 55 | .arrowHead_n { 56 | stroke: green; 57 | } 58 | 59 | .arrowHead_r { 60 | stroke-width: 3px; 61 | fill: red; 62 | stroke: red; 63 | } 64 | 65 | .arrowHead_b { 66 | stroke: dodgerblue; 67 | } 68 | 69 | .edgeTooltip { 70 | position: absolute; 71 | text-align: center; 72 | vertical-align: middle; 73 | min-width: 10px; 74 | min-height: 10px; 75 | padding: 5px; 76 | background: lightsteelblue; 77 | border: 1px solid black; 78 | border-radius: 8px; 79 | pointer-events: none; 80 | } 81 | 82 | .nodeInfo { 83 | position: absolute; 84 | text-align: left; 85 | vertical-align: middle; 86 | min-width: 10px; 87 | min-height: 10px; 88 | padding: 5px; 89 | background: lightsteelblue; 90 | border: 1px solid black; 91 | border-radius: 8px; 92 | pointer-events: none; 93 | } 94 | 95 | path.hull { 96 | fill: lightsteelblue; 97 | fill-opacity: 0.3; 98 | } -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/sandbox/gpuarray/tests/test_nerv.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | from nose.plugins.skip import SkipTest 3 | 4 | import numpy 5 | 6 | from theano import function 7 | from theano.tests import unittest_tools as utt 8 | from theano.tensor import vector, matrix, dot 9 | 10 | from .config import mode_with_gpu 11 | from ..nerv import Gemm16, nerv 12 | 13 | 14 | def test_gemm16_swap(): 15 | if nerv is None: 16 | raise SkipTest("nervanagpu not available") 17 | v = vector(dtype='float16') 18 | m = matrix(dtype='float16') 19 | m2 = matrix(dtype='float16') 20 | m32 = matrix(dtype='float32') 21 | 22 | # test that we don't try to replace anything but matrix x matrix in float16 23 | f = function([v, m], dot(v, m), mode=mode_with_gpu) 24 | assert len([node for node in f.maker.fgraph.apply_nodes 25 | if isinstance(node.op, Gemm16)]) == 0 26 | f = function([m32, m], dot(m32, m), mode=mode_with_gpu) 27 | assert len([node for node in f.maker.fgraph.apply_nodes 28 | if isinstance(node.op, Gemm16)]) == 0 29 | 30 | f = function([m, m2], dot(m, m2), mode=mode_with_gpu) 31 | assert len([node for node in f.maker.fgraph.apply_nodes 32 | if isinstance(node.op, Gemm16)]) == 1 33 | 34 | 35 | def test_gemm16_value(): 36 | if nerv is None: 37 | raise SkipTest("nervanagpu not available") 38 | m = matrix(dtype='float16') 39 | m2 = matrix(dtype='float16') 40 | 41 | f = function([m, m2], dot(m, m2), mode=mode_with_gpu) 42 | 43 | v1 = numpy.random.random((3, 4)).astype('float16') 44 | v2 = numpy.random.random((4, 2)).astype('float16') 45 | 46 | of = f(v1, v2) 47 | on = numpy.dot(v1, v2) 48 | 49 | utt.assert_allclose(of, on) 50 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/d3viz/tests/test_d3viz.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | 3 | import numpy as np 4 | import os.path as pt 5 | import tempfile 6 | import unittest 7 | import filecmp 8 | 9 | import theano as th 10 | import theano.d3viz as d3v 11 | from theano.d3viz.tests import models 12 | 13 | from nose.plugins.skip import SkipTest 14 | from theano.d3viz.formatting import pydot_imported 15 | if not pydot_imported: 16 | raise SkipTest('Missing requirements') 17 | 18 | 19 | class TestD3Viz(unittest.TestCase): 20 | 21 | def setUp(self): 22 | self.rng = np.random.RandomState(0) 23 | self.data_dir = pt.join('data', 'test_d3viz') 24 | 25 | def check(self, f, reference=None, verbose=False): 26 | tmp_dir = tempfile.mkdtemp() 27 | html_file = pt.join(tmp_dir, 'index.html') 28 | if verbose: 29 | print(html_file) 30 | d3v.d3viz(f, html_file) 31 | assert pt.getsize(html_file) > 0 32 | if reference: 33 | assert filecmp.cmp(html_file, reference) 34 | 35 | def test_mlp(self): 36 | m = models.Mlp() 37 | f = th.function(m.inputs, m.outputs) 38 | self.check(f) 39 | 40 | def test_mlp_profiled(self): 41 | m = models.Mlp() 42 | profile = th.compile.profiling.ProfileStats(False) 43 | f = th.function(m.inputs, m.outputs, profile=profile) 44 | x_val = self.rng.normal(0, 1, (1000, m.nfeatures)) 45 | f(x_val) 46 | self.check(f) 47 | 48 | def test_ofg(self): 49 | m = models.Ofg() 50 | f = th.function(m.inputs, m.outputs) 51 | self.check(f) 52 | 53 | def test_ofg_nested(self): 54 | m = models.OfgNested() 55 | f = th.function(m.inputs, m.outputs) 56 | self.check(f) 57 | -------------------------------------------------------------------------------- /bayesian_optimization/Theano-master/theano/d3viz/tests/models.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function, division 2 | import numpy as np 3 | 4 | from six import integer_types 5 | import theano as th 6 | import theano.tensor as T 7 | 8 | 9 | class Mlp(object): 10 | 11 | def __init__(self, nfeatures=100, noutputs=10, nhiddens=50, rng=None): 12 | if rng is None: 13 | rng = 0 14 | if isinstance(rng, integer_types): 15 | rng = np.random.RandomState(rng) 16 | self.rng = rng 17 | self.nfeatures = nfeatures 18 | self.noutputs = noutputs 19 | self.nhiddens = nhiddens 20 | 21 | x = T.dmatrix('x') 22 | wh = th.shared(self.rng.normal(0, 1, (nfeatures, nhiddens)), 23 | borrow=True) 24 | bh = th.shared(np.zeros(nhiddens), borrow=True) 25 | h = T.nnet.sigmoid(T.dot(x, wh) + bh) 26 | 27 | wy = th.shared(self.rng.normal(0, 1, (nhiddens, noutputs))) 28 | by = th.shared(np.zeros(noutputs), borrow=True) 29 | y = T.nnet.softmax(T.dot(h, wy) + by) 30 | 31 | self.inputs = [x] 32 | self.outputs = [y] 33 | 34 | 35 | class OfgNested(object): 36 | 37 | def __init__(self): 38 | x, y, z = T.scalars('xyz') 39 | e = x * y 40 | op = th.OpFromGraph([x, y], [e]) 41 | e2 = op(x, y) + z 42 | op2 = th.OpFromGraph([x, y, z], [e2]) 43 | e3 = op2(x, y, z) + z 44 | 45 | self.inputs = [x, y, z] 46 | self.outputs = [e3] 47 | 48 | 49 | class Ofg(object): 50 | 51 | def __init__(self): 52 | x, y, z = T.scalars('xyz') 53 | e = T.nnet.sigmoid((x + y + z)**2) 54 | op = th.OpFromGraph([x, y, z], [e]) 55 | e2 = op(x, y, z) + op(z, y, x) 56 | 57 | self.inputs = [x, y, z] 58 | self.outputs = [e2] 59 | --------------------------------------------------------------------------------