├── .github ├── CONTRIBUTING.md ├── ISSUE_TEMPLATE │ ├── bug-report.md │ └── feature-request.md └── SUPPORT.md ├── .gitignore ├── GP ├── BasicInterfaces.py ├── DKL │ ├── README.md │ ├── __init__.py │ ├── collect_data.py │ ├── dknet │ │ ├── __init__.py │ │ ├── layers │ │ │ ├── __init__.py │ │ │ ├── activation.py │ │ │ ├── convolutional.py │ │ │ ├── dense.py │ │ │ ├── dropout.py │ │ │ ├── layer.py │ │ │ ├── pooling.py │ │ │ └── reshape.py │ │ ├── loss.py │ │ ├── models.py │ │ ├── optimizers.py │ │ └── utils.py │ ├── example.py │ └── quadtest.py ├── DKLmodel.py ├── GP.py ├── GP_utils.py ├── GPexample.py ├── GPtools.py ├── OnlineGP.beforeReindent.py ├── OnlineGP.py ├── SPGPmodel.py ├── __init__.py ├── bayes_optimization.py ├── chaospy_sequences.py ├── discrete_test_script.py ├── example_script.py ├── extractGP.py ├── gaussian_process.py ├── gaussian_process_sklearn.py ├── heatmap.py ├── lcls_opt_script.py ├── minimize.py ├── parallelbasinhopping.py └── parallelstuff.py ├── LICENSE ├── RCDS ├── rcdsClass.py └── test1.py ├── README.md ├── UIOcelotInterface_gen.py ├── UIOcelotInterface_gen.ui ├── __init__.py ├── cli_examples └── optimization_sequence.py ├── docs ├── Makefile ├── _build │ ├── doctrees │ │ ├── environment.pickle │ │ ├── index.doctree │ │ ├── intro.doctree │ │ ├── modules.doctree │ │ ├── optimizer.GP.doctree │ │ ├── optimizer.doctree │ │ ├── optimizer.mint.doctree │ │ ├── optimizer.resetpanel.doctree │ │ ├── status.doctree │ │ ├── structure.doctree │ │ └── usage.doctree │ └── html │ │ ├── .buildinfo │ │ ├── _images │ │ ├── ocelot_dev_panel.jpg │ │ ├── ocelot_options.png │ │ ├── ocelot_plots.png │ │ ├── ocelot_resetpanel.png │ │ ├── ocelot_savefile.png │ │ └── ocelot_scan.png │ │ ├── _modules │ │ ├── index.html │ │ └── optimizer │ │ │ ├── GP │ │ │ ├── OnlineGP.html │ │ │ └── bayes_optimization.html │ │ │ ├── UIOcelotInterface_gen.html │ │ │ ├── generic_optim.html │ │ │ ├── gui_main.html │ │ │ ├── mint │ │ │ ├── lcls_interface.html │ │ │ ├── mint.html │ │ │ ├── obj_function.html │ │ │ ├── opt_objects.html │ │ │ └── xfel_interface.html │ │ │ └── resetpanel │ │ │ ├── UIresetpanel.html │ │ │ ├── resetpanel.html │ │ │ └── resetpanelbox.html │ │ ├── _sources │ │ ├── index.rst.txt │ │ ├── intro.rst.txt │ │ ├── modules.rst.txt │ │ ├── optimizer.GP.rst.txt │ │ ├── optimizer.mint.rst.txt │ │ ├── optimizer.resetpanel.rst.txt │ │ ├── optimizer.rst.txt │ │ ├── status.rst.txt │ │ ├── structure.rst.txt │ │ └── usage.rst.txt │ │ ├── _static │ │ ├── ajax-loader.gif │ │ ├── basic.css │ │ ├── classic.css │ │ ├── comment-bright.png │ │ ├── comment-close.png │ │ ├── comment.png │ │ ├── default.css │ │ ├── doctools.js │ │ ├── down-pressed.png │ │ ├── down.png │ │ ├── file.png │ │ ├── jquery-3.1.0.js │ │ ├── jquery.js │ │ ├── minus.png │ │ ├── plus.png │ │ ├── pygments.css │ │ ├── searchtools.js │ │ ├── sidebar.js │ │ ├── underscore-1.3.1.js │ │ ├── underscore.js │ │ ├── up-pressed.png │ │ ├── up.png │ │ └── websupport.js │ │ ├── genindex.html │ │ ├── index.html │ │ ├── intro.html │ │ ├── modules.html │ │ ├── objects.inv │ │ ├── optimizer.GP.html │ │ ├── optimizer.html │ │ ├── optimizer.mint.html │ │ ├── optimizer.resetpanel.html │ │ ├── py-modindex.html │ │ ├── search.html │ │ ├── searchindex.js │ │ ├── status.html │ │ ├── structure.html │ │ └── usage.html ├── help.txt ├── make.bat ├── readme │ ├── ocelot_anim.gif │ └── ocelot_schematic.png └── source │ ├── conf.py │ ├── images │ ├── ocelot_dev_panel.jpg │ ├── ocelot_options.png │ ├── ocelot_plots.png │ ├── ocelot_resetpanel.png │ ├── ocelot_savefile.png │ ├── ocelot_scan.png │ ├── ocelot_tab2.png │ └── ocelot_tab3.png │ ├── index.rst │ ├── intro.rst │ ├── modules.rst │ ├── optimizer.GP.rst │ ├── optimizer.mint.rst │ ├── optimizer.resetpanel.rst │ ├── optimizer.rst │ ├── status.rst │ ├── structure.rst │ └── usage.rst ├── generic_optim.py ├── gui_main.py ├── matrixmodel ├── __init__.py ├── archive_stuff.py ├── beamconfig.py ├── fullmachine_rmats ├── genesis_tools.py └── parallelstuff.py ├── microwave_timer.wav ├── mint ├── __init__.py ├── bessy │ ├── __init__.py │ ├── bessy_interface.py │ └── bessy_obj_function.py ├── demo │ ├── __init__.py │ └── demo_interface.py ├── flash │ ├── __init__.py │ ├── flash_interface.py │ └── flash_obj_function.py ├── hipa │ ├── __init__.py │ └── hipa_interface.py ├── lcls │ ├── __init__.py │ ├── lcls_devices.py │ ├── lcls_interface.py │ ├── lcls_obj_function.py │ └── simlog.py ├── linac4 │ ├── linac4_interface.py │ └── linac4_obj_function.py ├── mint.py ├── normscales.py ├── opt_objects.py ├── petra │ ├── __init__.py │ ├── petra_interface.py │ └── petra_obj_function.py ├── sls │ ├── __init__.py │ └── sls_interface.py ├── spear │ ├── __init__.py │ ├── simlog.py │ ├── spear_devices.py │ ├── spear_interface.py │ └── spear_obj_function.py ├── swissfel │ ├── __init__.py │ └── sf_interface.py └── xfel │ ├── __init__.py │ ├── xfel_interface.py │ └── xfel_obj_function.py ├── ocelot.png ├── op_methods ├── __init__.py ├── cobyla.py ├── custom_minimizer.py ├── es.py ├── gp_gpy.py ├── gp_sklearn.py ├── gp_slac.py ├── powell.py ├── rcds.py └── simplex.py ├── parameters ├── __init__.py ├── default.json ├── fit_params.pkl ├── fit_params_2017-05_to_2018-01.pkl ├── fit_params_2018-01_to_2018-01.pkl ├── fit_params_august.pkl ├── hype3.npy ├── hyperparameters.npy ├── lcls │ ├── default.json │ └── lion.pvs ├── lclsparams.txt ├── multinormal │ └── default.json ├── simSeed.mat ├── spear │ ├── default.json │ └── spear_hyperparams.pkl └── test.db ├── resetpanel ├── UIareyousure.ui ├── UIresetpanel.py ├── UIresetpanel.ui ├── __init__.py ├── resetpanel.py ├── resetpanelbox.py └── style.css ├── sint ├── __init__.py └── multinormal │ ├── __init__.py │ ├── multinormal_devices.py │ ├── multinormal_interface.py │ ├── multinormal_obj_function.py │ └── multinormal_ui.py ├── sound_off.svg ├── sound_on.svg ├── stats ├── __init__.py └── stats.py ├── style.css └── utils ├── __init__.py └── db.py /.github/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | ## Getting Started 4 | 5 | * Make sure you have a [GitHub account](https://github.com/signup/free) 6 | * Submit a ticket for your issue, assuming one does not already exist. 7 | * Clearly describe the issue including steps to reproduce when it is a bug. 8 | * Make sure you fill in the earliest version that you know has the issue. 9 | * Fork the repository on GitHub 10 | 11 | 12 | ## Making Changes 13 | 14 | * Create a topic branch from where you want to base your work. 15 | * This is usually the master branch. 16 | * Only target release branches if you are certain your fix must be on that 17 | branch. 18 | * To quickly create a topic branch based on master; `git checkout -b 19 | fix/master/my_contribution master`. Please avoid working directly on the 20 | `master` branch. 21 | * Make commits of logical units. 22 | * Check for unnecessary whitespace with `git diff --check` before committing. 23 | * Make sure your commit messages are in the proper format (see below) 24 | * Make sure you have added the necessary tests for your changes. 25 | * Run _all_ the tests to assure nothing else was accidentally broken. 26 | 27 | ### Writing the commit message 28 | 29 | Commit messages should be clear and follow a few basic rules. Example: 30 | 31 | ``` 32 | ENH: add functionality X to optimizer.. 33 | 34 | The first line of the commit message starts with a capitalized acronym 35 | (options listed below) indicating what type of commit this is. Then a blank 36 | line, then more text if needed. Lines shouldn't be longer than 72 37 | characters. If the commit is related to a ticket, indicate that with 38 | "See #3456", "See ticket 3456", "Closes #3456" or similar. 39 | ``` 40 | 41 | Describing the motivation for a change, the nature of a bug for bug fixes 42 | or some details on what an enhancement does are also good to include in a 43 | commit message. Messages should be understandable without looking at the code 44 | changes. 45 | 46 | Standard acronyms to start the commit message with are: 47 | 48 | 49 | |Code| Description | 50 | |----|----------------------------------------------------| 51 | |API | an (incompatible) API change | 52 | |BLD | change related to building | 53 | |BUG | bug fix | 54 | |DEP | deprecate something, or remove a deprecated object | 55 | |DEV | development tool or utility | 56 | |DOC | documentation | 57 | |ENH | enhancement | 58 | |MNT | maintenance commit (refactoring, typos, etc.) | 59 | |REV | revert an earlier commit | 60 | |STY | style fix (whitespace, PEP8) | 61 | |TST | addition or modification of tests | 62 | |REL | related to releasing numpy | 63 | |WIP | Commit that is a work in progress | 64 | 65 | ## The Pull Request 66 | 67 | * Now push to your fork 68 | * Submit a [pull request](https://help.github.com/articles/using-pull-requests) to this branch. This is a start to the conversation. 69 | 70 | At this point you're waiting on us. We like to at least comment on pull requests within three business days 71 | (and, typically, one business day). We may suggest some changes or improvements or alternatives. 72 | 73 | Hints to make the integration of your changes easy (and happen faster): 74 | - Keep your pull requests small 75 | - Don't forget your unit tests 76 | - All algorithms need documentation, don't forget the .rst file 77 | - Don't take changes requests to change your code personally 78 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug-report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Let us know if something is broken on Ocelot Optimizer. 4 | 5 | --- 6 | 7 | **Describe the bug** 8 | 9 | 10 | **Expected behavior** 11 | 12 | 13 | **Steps to Reproduce** 14 | 15 | 16 | **Possible Solution** 17 | 21 | 22 | **My Platform** 23 | 29 | 30 | **Additional context** 31 | 32 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature-request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest a new feature for Ocelot Optimizer 4 | 5 | --- 6 | 7 | **What's the problem this feature will solve?** 8 | 9 | 10 | **Describe the solution you'd like** 11 | 12 | 13 | **Additional context** 14 | 15 | -------------------------------------------------------------------------------- /.github/SUPPORT.md: -------------------------------------------------------------------------------- 1 | ### Getting Started 2 | 3 | ### Bug reports & Feature request 4 | 5 | If you spot a problem with Ocelot Optimizer, please let us know by following the template in 6 | here: [Report a bug](https://github.com/ocelot-collab/optimizer/issues/new?template=bug-report.md). 7 | 8 | Ideas or suggestions for enhancements are more than welcome. Please use the following 9 | template in here: [Request feature](https://github.com/ocelot-collab/optimizer/issues/new?template=feature-request.md). 10 | 11 | ### Contact us 12 | 13 | If you have questions of comments in general about Ocelot Optmizer we want to know. 14 | 15 | You can choose between the channels open for communication the one that best fit you: 16 | 17 | - Chat channel using [Slack]() (Public) 18 | - [File a bug](https://github.com/ocelot-collab/optimizer/issues/new?template=bug-report.md) and let us know where our documentation could be improved. 19 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | log.txt 2 | .idea/ 3 | 4 | # PyDev 5 | .settings 6 | *.project 7 | *.pydevproject 8 | 9 | # Byte-compiled / optimized / DLL files 10 | __pycache__/ 11 | *.py[cod] 12 | *$py.class 13 | 14 | # C extensions 15 | *.so 16 | 17 | # Distribution / packaging 18 | .Python 19 | build/ 20 | develop-eggs/ 21 | dist/ 22 | downloads/ 23 | eggs/ 24 | .eggs/ 25 | lib/ 26 | lib64/ 27 | parts/ 28 | sdist/ 29 | var/ 30 | wheels/ 31 | *.egg-info/ 32 | .installed.cfg 33 | *.egg 34 | 35 | # PyInstaller 36 | # Usually these files are written by a python script from a template 37 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 38 | *.manifest 39 | *.spec 40 | 41 | # Installer logs 42 | pip-log.txt 43 | pip-delete-this-directory.txt 44 | 45 | # Unit test / coverage reports 46 | htmlcov/ 47 | .tox/ 48 | .coverage 49 | .coverage.* 50 | .cache 51 | nosetests.xml 52 | coverage.xml 53 | *.cover 54 | .hypothesis/ 55 | 56 | # Translations 57 | *.mo 58 | *.pot 59 | 60 | # Django stuff: 61 | *.log 62 | local_settings.py 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # pyenv 81 | .python-version 82 | 83 | # celery beat schedule file 84 | celerybeat-schedule 85 | 86 | # SageMath parsed files 87 | *.sage.py 88 | 89 | # Environments 90 | .env 91 | .venv 92 | env/ 93 | venv/ 94 | ENV/ 95 | 96 | # Spyder project settings 97 | .spyderproject 98 | .spyproject 99 | 100 | # Rope project settings 101 | .ropeproject 102 | 103 | # mkdocs documentation 104 | /site 105 | 106 | # mypy 107 | .mypy_cache/ 108 | 109 | # vim 110 | *.swp 111 | -------------------------------------------------------------------------------- /GP/BasicInterfaces.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Contains simple interfaces for the Bayes optimization class. 4 | 5 | Each interface must have the getState and setX methods as used below. 6 | """ 7 | 8 | import numpy as np 9 | 10 | # Basically a dummy interface that just holds x-values 11 | class TestInterface(object): 12 | def __init__(self, x_init, y_init=0): 13 | self.x = np.array(x_init,ndmin=2) 14 | self.y = y_init 15 | 16 | def getState(self): 17 | return self.x, self.y 18 | 19 | def setX(self, x_new): 20 | self.x = x_new 21 | 22 | # an interface that evaluates a function to give y-values 23 | class fint(object): 24 | def __init__(self, x_init, noise=0): 25 | self.x = np.array(x_init,ndmin=2) 26 | self.y = -1 27 | self.noise = noise 28 | 29 | def getState(self): 30 | return self.x, self.f(self.x) 31 | 32 | def f(self, x): 33 | res = 20 - np.reshape(np.sum((x - 1)**2,axis=1) * np.sum((x + 1)**2,axis=1),x.shape) + x 34 | res[np.abs(x) > 3.0] = 0 35 | return res 36 | 37 | def setX(self, x_new): 38 | self.x = x_new 39 | 40 | # uses a GP model's predictions to give y-values 41 | class GPint(object): 42 | def __init__(self, x_init, model): 43 | self.model = model 44 | self.x = x_init 45 | self.y = model.predict(np.array(x_init,ndmin=2)) 46 | 47 | def getState(self): 48 | return self.x, self.model.predict(np.array(self.x,ndmin=2))[0] 49 | 50 | def setX(self, x_new): 51 | self.x = x_new -------------------------------------------------------------------------------- /GP/DKL/README.md: -------------------------------------------------------------------------------- 1 | # Deep-Kernel-GP 2 | 3 | ## Dependencies 4 | The package has numpy and scipy.linalg as dependencies. 5 | The examples also use matplotlib and scikit-learn 6 | 7 | ## Introduction 8 | 9 | 10 | 11 | Instead of learning a mapping X-->Y with a neural network or GP regression, we learn the following mappings: 12 | X-->Z-->Y where the first step is performed by a neural net and the second by a gp regression algorithm. 13 | 14 | This way we are able to use GP Regression to learn functions on data where the the assumption that y(x) is a gaussian surface with covariance specified by one of the standard covariance fucntions, might not be a fair assumption. 15 | For instance we can learn functions with image pixels as inputs or functions with length scales that varies with the input. 16 | 17 | 18 | The parameters of the neural net are trained maximizing the log marginal likelihood implied by z(x_train) and y_train. 19 | 20 | [Deep Kernel Learning - A.G. Wilson ++ ](https://arxiv.org/pdf/1511.02222.pdf) 21 | 22 | [Using Deep Belief Nets to Learn Covariance Kernels 23 | for Gaussian Processes - G. Hinton ++](http://www.cs.toronto.edu/~fritz/absps/dbngp.pdf) 24 | 25 | ## Examples 26 | Basic usage is done with a Scikit ish API: 27 | 28 | ```python 29 | 30 | layers=[] 31 | layers.append(Dense(32,activation='tanh')) 32 | layers.append(Dense(1)) 33 | layers.append(CovMat(kernel='rbf')) 34 | 35 | opt=Adam(1e-3) # or opt=SciPyMin('l-bfgs-b') 36 | 37 | gp=NNRegressor(layers,opt=opt,batch_size=x_train.shape[0],maxiter=1000,gp=True,verbose=True) 38 | gp.fit(x_train,y_train) 39 | y_pred,std=gp.predict(x_test) 40 | 41 | ``` 42 | 43 | The example creates a mapping z(x) where both x and z are 1d vectors using a neural network with 1 hidden layer. 44 | The CovMat layer creates a covariance matrix from z using the covariance function v\*exp(-0.5*|z1-z2|**2) with noise y where x and y are learned during training. 45 | 46 | x and y are available after training as gp.layers[-1].var and gp.layers[-1].s_alpha. 47 | The gp.fast_forward() function can be used to extract the z(x) function (It skips the last layer that makes an array of size [batch_size, batch_size]). 48 | 49 | ### Learning a function with varying length scale 50 | 51 | In the example.py script, deep kernel learning (DKL) is used to learn from samples of the function sin(64(x+0.5)**4). 52 | 53 | Learning this function with a Neural Network would be hard, since it can be challenging to fit rapidly oscilating functions using NNs. 54 | Learning the function using GPRegression with a squared exponential covariance function, would also be suboptimal, since we need to commit to one fixed length scale. 55 | Unless we have a lot of samples,we would be forced to give up precision on the slowly varying part of the function. 56 | 57 | DKL Prediction: 58 | 59 |

60 |

61 | 62 | 63 |
DKL Prediction
64 | 65 | 66 |
67 |
68 | 69 |
z(x) function learned by neural network.
70 |
71 |

72 | 73 | We see that DKL solves the problem quite nicely, given the limited data. We also see that for x<-0.5 the std.dev of the DKL model does not capture the prediction error. 74 | -------------------------------------------------------------------------------- /GP/DKL/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from . import dknet 4 | 5 | -------------------------------------------------------------------------------- /GP/DKL/collect_data.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | import numpy as np 4 | import scipy.io as sio 5 | 6 | 7 | base_path = '/u1/lcls/matlab/data/2018/2018-01/' 8 | quadlist = ['620', '640', '660', '680'] 9 | quadlist = sorted(['QUAD_LTU1_' + x + '_BCTRL' for x in quadlist]) 10 | gdet = 'GDET_FEE1_241_ENRCHSTBR' 11 | energy = 'BEND_DMP1_400_BDES' 12 | 13 | X = np.zeros((0,len(quadlist)+1)) 14 | 15 | for dir in os.listdir(base_path): 16 | path = base_path + dir + '/' 17 | for f in os.listdir(path): 18 | if f[:3]=='Oce': 19 | try: 20 | rawdat = sio.loadmat(path+f)['data'] 21 | except: 22 | continue 23 | if set(quadlist).issubset(set(rawdat.dtype.names)): 24 | y = rawdat[gdet][0][0] 25 | es = rawdat[energy][0][0] 26 | qs = [rawdat[q][0][0] for q in quadlist] 27 | shps = [x.shape[1] for x in qs] 28 | if y.shape[1] < 3 or min(shps) != max(shps) or shps[0] < 3: 29 | continue 30 | if y.shape[1] > shps[0]: 31 | y = y[:,:-1] 32 | new_stack = np.zeros((y.shape[1], X.shape[1])) 33 | for i,q in enumerate(quadlist): 34 | new_stack[:,i] = qs[i] / es 35 | 36 | new_stack[:,-1] = y 37 | X = np.concatenate((X,new_stack),axis=0) 38 | 39 | np.savetxt('ltus_enormed.csv', X) 40 | 41 | 42 | -------------------------------------------------------------------------------- /GP/DKL/dknet/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from . import layers 4 | from . import models 5 | from . import optimizers 6 | from . import utils 7 | from . import loss 8 | from .models import NNRegressor -------------------------------------------------------------------------------- /GP/DKL/dknet/layers/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from . import layer 3 | from . import activation 4 | from . import convolutional 5 | from . import dense 6 | from . import reshape 7 | from . import pooling 8 | from . import dropout 9 | 10 | 11 | from .pooling import MaxPool2D,AveragePool2D 12 | from .dense import Dense,RNNCell,CovMat,Parametrize,Scale 13 | from .convolutional import Conv2D 14 | from .activation import Activation 15 | from .reshape import Flatten 16 | from .dropout import Dropout -------------------------------------------------------------------------------- /GP/DKL/dknet/layers/activation.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from numpy import unravel_index 3 | from .layer import Layer 4 | def relu(x,dtype=numpy.float64): 5 | tmp=(x>=0) 6 | return x*tmp,1*tmp 7 | 8 | def sigmoid(x,dtype=numpy.float64): 9 | a=1.0/(numpy.exp(-x)+1.0) 10 | return a, a*(1-a) 11 | 12 | def linear(x,dtype=numpy.float64): 13 | return x,1.0#numpy.ones_like(x,dtype=dtype) 14 | 15 | def tanh(x,dtype=numpy.float64): 16 | a=numpy.tanh(x) 17 | return a, 1.0-a**2 18 | 19 | def lrelu(x,dtype=numpy.float64): 20 | y=(x>=0)*1.0+(x<0)*0.01 21 | return y*x,y 22 | 23 | def softplus(x,dtype=numpy.float64): 24 | tmp=numpy.exp(x) 25 | return numpy.log(tmp+1.0), tmp/(1.0+tmp) 26 | 27 | def softmax(x,dtype=numpy.float64): 28 | s=numpy.exp(x) 29 | s=s/numpy.sum(s,1)[:,numpy.newaxis] 30 | return s,s*(1.0-s) 31 | 32 | def rbf(x,dtype=numpy.float64): 33 | 34 | s=numpy.exp(-0.5*numpy.sum(x**2,-1)) 35 | print(x.shape,s.shape) 36 | return s, -x*s[:,:,numpy.newaxis] 37 | 38 | class Activation(Layer): 39 | 40 | dict={'linear':linear,'relu':relu,'sigmoid':sigmoid,'tanh':tanh,'softmax':softmax,'lrelu':lrelu,'softplus':softplus,'rbf':rbf} 41 | 42 | def __init__(self,strr): 43 | 44 | if strr in self.dict.keys(): 45 | self.afstr=strr 46 | self.af=self.dict[strr] 47 | else: 48 | print("Error. Undefined activation function '" + str(strr)+"'. Using linear activation.") 49 | print("Available activations: " + str(list(self.dict.keys()))) 50 | self.af=linear 51 | self.afstr='linear' 52 | self.trainable=False 53 | def forward(self,X): 54 | self.inp=X 55 | self.a=self.af(X,dtype=self.dtype) 56 | self.out=self.a[0] 57 | return self.out 58 | def backward(self,err): 59 | return self.a[1]*err 60 | 61 | -------------------------------------------------------------------------------- /GP/DKL/dknet/layers/convolutional.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from numpy import unravel_index 3 | from .activation import Activation 4 | from .layer import Layer 5 | 6 | class Conv2D(Layer): 7 | def __init__(self,n_out,kernel_size,activation=None): 8 | self.n_out=n_out 9 | self.activation=activation 10 | self.kernel_size=kernel_size 11 | self.trainable=True 12 | def initialize_ws(self): 13 | self.W=numpy.random.randn(self.kernel_size[0],self.kernel_size[1],self.n_inp,self.n_out).astype(dtype=self.dtype)*numpy.sqrt(1.0/(self.n_inp*numpy.prod(self.kernel_size))) 14 | self.b=numpy.zeros((1,self.n_out),dtype=self.dtype) 15 | self.dW=numpy.zeros_like(self.W,dtype=self.dtype) 16 | self.db=numpy.zeros_like(self.b,dtype=self.dtype) 17 | assert(self.W.shape[0]%2!=0) #Odd filter size pls 18 | assert(self.W.shape[1]%2!=0) #Odd fiter size pls 19 | def forward(self,X): 20 | self.inp=X 21 | 22 | hpad,wpad=int(self.W.shape[0]/2),int(self.W.shape[1]/2) 23 | X2=numpy.zeros((X.shape[0],X.shape[1]+2*hpad,X.shape[2]+2*wpad,X.shape[3]),dtype=self.dtype) 24 | X2[:,hpad:X2.shape[1]-hpad,wpad:X2.shape[2]-wpad,:]=numpy.copy(X) 25 | A=numpy.zeros((X.shape[0],X.shape[1],X.shape[2],self.n_out),dtype=self.dtype) 26 | M,N=X.shape[1],X.shape[2] 27 | for i in range(0,M): 28 | for j in range(0,N): 29 | A[:,i,j,:]=numpy.sum(X2[:,hpad+i-hpad:hpad+i+hpad+1,wpad+j-wpad:wpad+j+wpad+1,:][:,:,:,:,numpy.newaxis]*self.W[numpy.newaxis,:,:,:,:],axis=(1,2,3)) 30 | A+=self.b[0,:] 31 | 32 | self.out=A 33 | return self.out 34 | 35 | def backward(self,err): 36 | 37 | X=self.inp 38 | hpad,wpad=int(self.W.shape[0]/2),int(self.W.shape[1]/2) 39 | X2=numpy.zeros((X.shape[0],X.shape[1]+2*hpad,X.shape[2]+2*wpad,X.shape[3]),dtype=self.dtype) 40 | X2[:,hpad:X2.shape[1]-hpad,wpad:X2.shape[2]-wpad,:]=numpy.copy(X) 41 | 42 | tmpdW=numpy.zeros_like(self.dW,dtype=self.dtype) 43 | dodi=numpy.zeros_like(X2,dtype=self.dtype) 44 | M,N=X.shape[1],X.shape[2] 45 | for i in range(0,M): 46 | for j in range(0,N): 47 | tmpdW+=numpy.sum(err[:,i,j,:][:,numpy.newaxis,numpy.newaxis,numpy.newaxis,:]*X2[:,i:i+2*hpad+1,j:j+2*wpad+1,:][:,:,:,:,numpy.newaxis],0) 48 | dodi[:,i:i+2*hpad+1,j:j+2*wpad+1,:]+=numpy.sum(err[:,i,j,:][:,numpy.newaxis,numpy.newaxis,numpy.newaxis,:]*self.W[numpy.newaxis,:,:,:,:],-1) 49 | self.dW=tmpdW 50 | self.db[0,:]=numpy.sum(err,(0,1,2)) 51 | 52 | return dodi[:,hpad:dodi.shape[1]-hpad,wpad:dodi.shape[2]-wpad,:] -------------------------------------------------------------------------------- /GP/DKL/dknet/layers/dropout.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from .layer import Layer 3 | class Dropout(Layer): 4 | 5 | def __init__(self,keep_prob): 6 | self.keep_prob=keep_prob 7 | self.trainable=False 8 | def forward(self,X): 9 | self.inp=X 10 | self.mask=(numpy.random.random(size=X.shape).astype(self.dtype)<=self.keep_prob)/self.keep_prob 11 | self.out=self.mask*self.inp 12 | return self.out 13 | 14 | def predict(self,X): 15 | self.inp=X 16 | self.out=X 17 | self.mask=numpy.ones_like(X) 18 | return X 19 | 20 | def backward(self,err): 21 | return err*self.mask 22 | -------------------------------------------------------------------------------- /GP/DKL/dknet/layers/layer.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | 3 | class Layer: 4 | dtype=numpy.float64 5 | def set_inp(self,n_inp): 6 | self.n_inp=n_inp -------------------------------------------------------------------------------- /GP/DKL/dknet/layers/pooling.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from numpy import unravel_index 3 | from .layer import Layer 4 | class MaxPool2D(Layer): 5 | def __init__(self,pool_size=(2,2)): 6 | self.trainable=False 7 | self.pool_size=pool_size 8 | def forward(self,X): 9 | self.inp=X 10 | self.mask=numpy.zeros_like(X) 11 | assert(X.shape[1]%self.pool_size[0]==0) 12 | assert(X.shape[2]%self.pool_size[1]==0) 13 | self.out=numpy.zeros((X.shape[0],int(X.shape[1]/self.pool_size[0]),int(X.shape[2]/self.pool_size[1]),X.shape[3])) 14 | 15 | for i in range(0,self.out.shape[1]): 16 | for j in range(0,self.out.shape[2]): 17 | a=X[:,self.pool_size[0]*i:self.pool_size[0]*(i+1),self.pool_size[1]*j:self.pool_size[1]*(j+1),:] 18 | mv=numpy.max(a,axis=(1,2)) 19 | self.out[:,i,j,:] = mv 20 | self.mask[:,self.pool_size[0]*i:self.pool_size[0]*(i+1),self.pool_size[1]*j:self.pool_size[1]*(j+1),:]=mv[:,numpy.newaxis,numpy.newaxis,:] 21 | return self.out 22 | 23 | def backward(self,err): 24 | err2=numpy.zeros_like(self.inp) 25 | for i in range(0,self.out.shape[1]): 26 | for j in range(0,self.out.shape[2]): 27 | mm=(self.mask[:,self.pool_size[0]*i:self.pool_size[0]*(i+1),self.pool_size[1]*j:self.pool_size[1]*(j+1),:]==self.inp[:,self.pool_size[0]*i:self.pool_size[0]*(i+1),self.pool_size[1]*j:self.pool_size[1]*(j+1),:]) 28 | ms=numpy.sum(mm,axis=(1,2)) 29 | err2[:,self.pool_size[0]*i:self.pool_size[0]*(i+1),self.pool_size[1]*j:self.pool_size[1]*(j+1),:]=(mm/ms[:,numpy.newaxis,numpy.newaxis,:])*err[:,i,j,:][:,numpy.newaxis,numpy.newaxis,:] 30 | return err2 31 | class AveragePool2D(Layer): 32 | def __init__(self,pool_size=(2,2)): 33 | self.trainable=False 34 | self.pool_size=pool_size 35 | def forward(self,X): 36 | self.inp=X 37 | assert(X.shape[1]%self.pool_size[0]==0) 38 | assert(X.shape[2]%self.pool_size[1]==0) 39 | self.out=numpy.zeros((X.shape[0],int(X.shape[1]/self.pool_size[0]),int(X.shape[2]/self.pool_size[1]),X.shape[3])) 40 | 41 | for i in range(0,self.out.shape[1]): 42 | for j in range(0,self.out.shape[2]): 43 | a=X[:,self.pool_size[0]*i:self.pool_size[0]*(i+1),self.pool_size[1]*j:self.pool_size[1]*(j+1),:] 44 | mv=numpy.average(a,axis=(1,2)) 45 | self.out[:,i,j,:] = mv 46 | return self.out 47 | 48 | def backward(self,err): 49 | err2=numpy.zeros_like(self.inp) 50 | for i in range(0,self.out.shape[1]): 51 | for j in range(0,self.out.shape[2]): 52 | err2[:,self.pool_size[0]*i:self.pool_size[0]*(i+1),self.pool_size[1]*j:self.pool_size[1]*(j+1),:]=err[:,i,j,:][:,numpy.newaxis,numpy.newaxis,:]/numpy.prod(self.pool_size) 53 | return err2 -------------------------------------------------------------------------------- /GP/DKL/dknet/layers/reshape.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | from numpy import unravel_index 3 | from .layer import Layer 4 | class Flatten(Layer): 5 | def __init__(self): 6 | self.trainable=False 7 | def forward(self,X): 8 | self.inp=numpy.copy(X) 9 | self.out=X.reshape(X.shape[0],numpy.prod(X.shape[1::])) 10 | return self.out 11 | def backward(self,err): 12 | return err.reshape(self.inp.shape) -------------------------------------------------------------------------------- /GP/DKL/dknet/loss.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | def mse_loss(y_true,y_pred): 3 | return 0.5*numpy.average((y_true-y_pred)**2),(y_pred-y_true)/numpy.prod(y_true.shape) 4 | 5 | def cce_loss(y_true,y_pred): 6 | return -numpy.average(numpy.sum(y_true*numpy.log(y_pred),1)), (y_pred-y_true)/(y_pred*(1.0-y_pred)+1e-12)/y_true.shape[0] -------------------------------------------------------------------------------- /GP/DKL/dknet/utils.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | 3 | def one_hot(x,n_classes): 4 | assert(len(x.shape)==1) 5 | A=numpy.zeros((x.shape[0],n_classes)) 6 | A[numpy.arange(len(x)),x]=1.0 7 | return A 8 | def calc_acc(y_true,y_pred): 9 | if y_true.shape[1] > 1: 10 | return numpy.average(numpy.argmax(y_true,1)==numpy.argmax(y_pred,1)) 11 | else: 12 | return numpy.average(1.0*(y_pred>=0.5) == y_true) 13 | def r2(y_true,y_pred): 14 | avg = numpy.mean(y_true,0) 15 | var = numpy.sum((y_true-avg)**2,0) 16 | err = numpy.sum((y_true-y_pred)**2,0) 17 | r2=1.0-err/var 18 | #print(r2) 19 | return r2 20 | def normalize(X,sub,div): 21 | return (numpy.copy(X)-sub)/div 22 | 23 | def unpickle(file): 24 | import pickle 25 | with open(file, 'rb') as fo: 26 | dict = pickle.load(fo, encoding='bytes') 27 | return dict 28 | 29 | def load_cifar(shuffle=False): 30 | x_train=numpy.zeros((0,32,32,3)) 31 | y_train=numpy.zeros((0,),dtype=numpy.int) 32 | x_test=numpy.zeros((0,32,32,3)) 33 | y_test=numpy.zeros((0,),dtype=numpy.int) 34 | for i in range(0,5): 35 | dat=unpickle("data/cifar10/data_batch_"+str(i+1)) 36 | print("KEYS: ") 37 | print(dat.keys()) 38 | xdat=numpy.zeros((len(dat[b'data']),32,32,3)) 39 | xdat[:,:,:,0]=dat[b'data'][:,0:1024].reshape(-1,32,32) 40 | xdat[:,:,:,1]=dat[b'data'][:,1024:2048].reshape(-1,32,32) 41 | xdat[:,:,:,2]=dat[b'data'][:,2048:3072].reshape(-1,32,32) 42 | x_train=numpy.concatenate((x_train,xdat),0) 43 | y_train=numpy.concatenate((y_train,dat[b"labels"])) 44 | 45 | dat=unpickle("data/cifar10/test_batch") 46 | xdat=numpy.zeros((len(dat[b'data']),32,32,3)) 47 | xdat[:,:,:,0]=dat[b'data'][:,0:1024].reshape(-1,32,32) 48 | xdat[:,:,:,1]=dat[b'data'][:,1024:2048].reshape(-1,32,32) 49 | xdat[:,:,:,2]=dat[b'data'][:,2048:3072].reshape(-1,32,32) 50 | x_test=numpy.concatenate((x_test,xdat),0) 51 | y_test=numpy.concatenate((y_test,dat[b"labels"])) 52 | 53 | x_train=x_train.astype('float32') 54 | x_test=x_test.astype('float32') 55 | x_train /= 255.0 56 | x_test /= 255.0 57 | 58 | y_train=y_train.astype('int') 59 | y_test=y_test.astype('int') 60 | print(y_train) 61 | y_train = one_hot(y_train, 10) 62 | y_test = one_hot(y_test, 10) 63 | 64 | if shuffle: 65 | #Shuffle data. 66 | tmp=numpy.arange(len(x_train)) 67 | numpy.random.shuffle(tmp) 68 | x_train,y_train=x_train[tmp],y_train[tmp] 69 | 70 | tmp=numpy.arange(len(x_test)) 71 | numpy.random.shuffle(tmp) 72 | x_test,y_test=x_test[tmp],y_test[tmp] 73 | 74 | return [[x_train,y_train],[x_test,y_test]] 75 | def load_mnist(shuffle=False): 76 | 77 | #If error loading files, use this to aquire mnist, if you have keras. 78 | # 79 | #from keras.datasets import mnist 80 | #(x_train, y_train), (x_test, y_test) = mnist.load_data() 81 | #numpy.savez_compressed("data/mnist/mnist_train",a=x_train,b=y_train) 82 | #numpy.savez_compressed("data/mnist/mnist_test",a=x_test,b=y_test) 83 | 84 | tftr,tfte=numpy.load("data/mnist/mnist_train.npz"),numpy.load("data/mnist/mnist_test.npz") 85 | x_train,y_train=tftr['a'],tftr['b'] 86 | x_test,y_test=tfte['a'],tfte['b'] 87 | 88 | x_train=x_train.astype('float32').reshape(-1,28,28,1) 89 | x_test=x_test.astype('float32').reshape(-1,28,28,1) 90 | x_train /= 255.0 91 | x_test /= 255.0 92 | y_train = one_hot(y_train, 10) 93 | y_test = one_hot(y_test, 10) 94 | 95 | if shuffle: 96 | #Shuffle data. 97 | tmp=numpy.arange(len(x_train)) 98 | numpy.random.shuffle(tmp) 99 | x_train,y_train=x_train[tmp],y_train[tmp] 100 | 101 | tmp=numpy.arange(len(x_test)) 102 | numpy.random.shuffle(tmp) 103 | x_test,y_test=x_test[tmp],y_test[tmp] 104 | 105 | return [[x_train,y_train],[x_test,y_test]] 106 | 107 | 108 | def grad_check(model,X,Y,check_n_params=50): 109 | eps=1e-7 110 | 111 | ll=[] 112 | for n in range(0,check_n_params): 113 | model.forward(X,gc=True) 114 | model.backward(Y) 115 | i=numpy.random.randint(len(model.layers)) 116 | while not model.layers[i].trainable: 117 | i=numpy.random.randint(len(model.layers)) 118 | nums=[] 119 | for j in range(0,len(model.layers[i].W.shape)): 120 | nums.append(numpy.random.randint(model.layers[i].W.shape[j])) 121 | nums=tuple(nums) 122 | 123 | bnum=[] 124 | for j in range(0,len(model.layers[i].b.shape)): 125 | bnum.append(numpy.random.randint(model.layers[i].b.shape[j])) 126 | bnum=tuple(bnum) 127 | 128 | dW=model.layers[i].dW.item(nums) 129 | db=model.layers[i].db.item(bnum) 130 | W=numpy.copy(model.layers[i].W) 131 | b=numpy.copy(model.layers[i].b) 132 | 133 | model.layers[i].W.itemset(nums,W.item(nums)+eps) 134 | model.forward(X,gc=True) 135 | model.backward(Y) 136 | jp=model.j 137 | 138 | model.layers[i].W.itemset(nums,W.item(nums)-eps) 139 | model.forward(X,gc=True) 140 | model.backward(Y) 141 | jm=model.j 142 | model.layers[i].W.itemset(nums,W.item(nums)) 143 | 144 | dW2=0.5*(jp-jm)/eps 145 | 146 | model.layers[i].b.itemset(bnum,b.item(bnum)+eps) 147 | model.forward(X,gc=True) 148 | model.backward(Y) 149 | jp=model.j 150 | model.layers[i].b.itemset(bnum,b.item(bnum)-eps) 151 | model.forward(X,gc=True) 152 | model.backward(Y) 153 | jm=model.j 154 | 155 | db2=0.5*(jp-jm)/eps 156 | model.layers[i].b.itemset(bnum,b.item(bnum)) 157 | tmp=[numpy.abs(db2-db),numpy.abs(dW2-dW)] 158 | ll.append(tmp) 159 | #print(ll) 160 | ll=numpy.array(ll) 161 | return numpy.max(ll,0) -------------------------------------------------------------------------------- /GP/DKL/example.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | from dknet import NNRegressor 5 | from dknet.layers import Dense,CovMat,Dropout,Parametrize,Scale 6 | from dknet.optimizers import Adam,SciPyMin,SDProp 7 | #from sklearn.gaussian_process import GaussianProcessClassifier,GaussianProcessRegressor 8 | #from sklearn.gaussian_process.kernels import RBF,WhiteKernel,ConstantKernel 9 | def f(x): 10 | return (x+0.5>=0)*np.sin(64*(x+0.5)**4)#-1.0*(x>0)+numpy. 11 | 12 | np.random.seed(0) 13 | x_train=np.random.random(size=(100,1))-0.5 14 | y_train=f(x_train)+np.random.normal(0.0,0.01,size=x_train.shape) 15 | 16 | 17 | 18 | layers=[] 19 | #layers.append(Dense(64,activation='tanh')) 20 | #layers.append(Dropout(0.99)) 21 | layers.append(Dense(6,activation='tanh')) 22 | #layers.append(Dropout(0.99)) 23 | layers.append(Dense(1)) 24 | layers.append(Scale(fixed=True,init_vals=64.0)) 25 | layers.append(CovMat(kernel='rbf',alpha_fixed=False)) 26 | 27 | opt=Adam(1e-3) 28 | #opt=SciPyMin('l-bfgs-b') 29 | 30 | gp=NNRegressor(layers,opt=opt,batch_size=20,maxiter=1000,gp=True,verbose=True) 31 | gp.fit(x_train,y_train) 32 | #print(gp.grad_check(x_train[0:10],y_train[0:10])) 33 | x_test=np.linspace(-0.7,0.7,1000).reshape(-1,1) 34 | 35 | 36 | 37 | y_pred,std=gp.predict(x_test) 38 | 39 | 40 | plt.plot(x_test,gp.layers[-2].out) 41 | plt.xlabel('X') 42 | plt.ylabel('Z') 43 | plt.figure() 44 | 45 | plt.plot(x_train,y_train,'.') 46 | plt.plot(x_test,f(x_test)[:,0]) 47 | plt.plot(x_test,y_pred) 48 | plt.xlabel('X') 49 | plt.ylabel('Y') 50 | plt.fill_between(x_test[:,0],y_pred[:,0]-std,y_pred[:,0]+std,alpha=0.5) 51 | 52 | plt.legend(['Training samples', 'True function', 'Predicted function','Prediction stddev']) 53 | plt.show() 54 | -------------------------------------------------------------------------------- /GP/DKL/quadtest.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | from dknet import NNRegressor 5 | from dknet.layers import Dense,CovMat,Dropout,Parametrize 6 | from dknet.optimizers import Adam,SciPyMin,SDProp 7 | 8 | np.random.seed(1) 9 | data = np.loadtxt('ltus_en.txt') 10 | train_data = data[:1000] 11 | test_full_data = data[1000:] 12 | np.random.shuffle(test_full_data) 13 | test_data = test_full_data[:1000] 14 | 15 | which_qs = [2,3] 16 | x_train = train_data[:,which_qs] 17 | y_train = train_data[:,[-1]] 18 | x_test = test_data[:,which_qs] 19 | y_test = test_data[:,[-1]] 20 | 21 | layers=[] 22 | n_out = 2 23 | layers.append(Dense(100,activation='lrelu')) 24 | #layers.append(Dropout(0.8)) 25 | layers.append(Dense(100,activation='lrelu')) 26 | #layers.append(Dropout(0.8)) 27 | #layers.append(Dense(50,activation='lrelu')) 28 | layers.append(Dense(n_out)) 29 | layers.append(CovMat(kernel='rbf',alpha_fixed=False)) 30 | 31 | opt=Adam(1e-4) 32 | 33 | gp=NNRegressor(layers,opt=opt,batch_size=50,maxiter=4000,gp=True,verbose=False) 34 | gp.fit(x_train,y_train) 35 | 36 | if len(which_qs) > 2 or n_out > 2 or True: 37 | ytr_pred,std=gp.predict(x_train) 38 | ytestpred,std = gp.predict(x_test) 39 | ydumb = np.mean(y_train) 40 | 41 | mse_train = np.mean((ytr_pred - y_train)**2) 42 | mse_test = np.mean((y_test - ytestpred)**2) 43 | mse_dumb = np.mean((y_test - ydumb)**2) 44 | print 'train',np.sqrt(mse_train) 45 | print 'test',np.sqrt(mse_test) 46 | print 'dumb',np.sqrt(mse_dumb) 47 | 48 | if len(which_qs)==2 and n_out==2: 49 | get_p = lambda i,p: np.percentile(data[:,which_qs[i]], p) 50 | r1 = (get_p(0,5), get_p(0,95)) 51 | r2 = (get_p(1,5), get_p(1,95)) 52 | 53 | full1 = np.linspace(r1[0], r1[1], 1000)[:,np.newaxis] 54 | full2 = np.linspace(r2[0], r2[1], 1000)[:,np.newaxis] 55 | 56 | num_lines = 6 57 | sp1 = np.linspace(r1[0], r1[1], num_lines) 58 | sp2 = np.linspace(r2[0], r2[1], num_lines) 59 | 60 | points = np.zeros((0,2)) 61 | zs = np.zeros((0,2)) 62 | for i in range(num_lines): 63 | verts1 = np.ones_like(full2) * sp1[i] 64 | points = np.concatenate((points, np.concatenate((verts1,full2),axis=1)),axis=0) 65 | horiz2 = np.ones_like(full1) * sp2[i] 66 | points = np.concatenate((points, np.concatenate((full1,horiz2),axis=1)),axis=0) 67 | 68 | zs = np.concatenate((zs, gp.fast_forward(points[-2*verts1.shape[0]:])),axis=0) 69 | 70 | alldatx = data[:,which_qs] 71 | alldaty = data[:,-1] 72 | alldatz = gp.fast_forward(alldatx) 73 | 74 | plt.scatter(points[:,0],points[:,1],lw=0,c=points[:,0] + points[:,1]) 75 | plt.scatter(alldatx[:,0], alldatx[:,1], lw=0, c=alldaty) 76 | plt.xlim([-.3+r1[0], .3+r1[1]]) 77 | plt.ylim([-.3+r2[0], .3+r2[1]]) 78 | plt.show() 79 | plt.close() 80 | plt.scatter(zs[:,0], zs[:,1], lw=0, c=points[:,0] + points[:,1]) 81 | plt.scatter(alldatz[:,0], alldatz[:,1], lw=0, c=alldaty) 82 | plt.xlim([-.3+np.min(zs[:,0]),.3+np.max(zs[:,0])]) 83 | plt.ylim([-.3+np.min(zs[:,1]),.3+np.max(zs[:,1])]) 84 | plt.show() 85 | 86 | 87 | -------------------------------------------------------------------------------- /GP/GP.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Wed Nov 18 19:46:52 2015 4 | 5 | @author: Mitch 6 | """ 7 | 8 | import numpy as np 9 | import numpy.linalg as alg 10 | from minimize import minimize 11 | from GP_utils import * 12 | 13 | def SPGP_train(X,Y,num_pseudo_inputs,num_starts=1): 14 | """ 15 | Trains a sparse Gaussian process on the input data. 16 | X -- DataFrame with training data (n x dim) 17 | Y -- Labels for training data (n x 1) 18 | num_pseudo_inputs -- number of points used to fill sparse model 19 | num_starts -- number of attempts at minimization. Increases runtime linearly. 20 | 21 | Returns: 22 | xb -- pseudo-inputs as ndarray (m x dim) 23 | hyperparams -- tuple containing GP parameters 24 | 25 | Translated to python from Edward Snelson's matlab code by Mitchell McIntire. 26 | """ 27 | 28 | (n,dim) = X.shape 29 | m = np.min([num_pseudo_inputs,n]) 30 | 31 | # center data 32 | mu_y = np.mean(Y) 33 | y0 = Y - mu_y 34 | 35 | min_lik = np.inf 36 | for i in range(num_starts): 37 | # randomly choose initial points 38 | # should randomly sample, but hacking this in for the ACR since 39 | # the pandas version is older 40 | #xb_init = np.array(X.sample(m)) 41 | xb_init = np.array(X.iloc[:m,:]) 42 | 43 | # initialize hyperparameters 44 | hyp_ARD = np.array([-2*np.log((X.max() - X.min() + 0.1) / 2)]) 45 | hyp_coeff = np.array([[np.log(Y.var() + 0.1)]]) 46 | hyp_noise = np.array([[np.log(Y.var() / 4 + 0.01)]]) 47 | hyperparams = pack_hyps(xb_init, hyp_ARD, hyp_coeff, hyp_noise) 48 | 49 | # minimize neg. log likelihood 50 | # min_result = minimize(SPGP_likelihood, hyperparams, args=(y0,np.array(X),m), method='BFGS', jac=True) 51 | #iter_res = np.reshape(min_result.x, (1,(m+1)*dim + 2)) 52 | #lik = SPGP_likelihood(iter_res,y0,np.array(X),m,compute_deriv=False) 53 | #st = time.time() 54 | (iter_res, lik, i) = minimize(hyperparams, SPGP_likelihood, args=(y0,np.array(X),m), maxnumfuneval=200) 55 | #print(time.time() - st) 56 | if(lik[0] < min_lik): 57 | min_lik = lik[0] 58 | opt_res = iter_res 59 | 60 | # extract minimizing hyperparameters 61 | (xb, hyp_ARD, hyp_coeff, hyp_noise) = unpack_hyps(opt_res, m, dim) 62 | 63 | hyperparams = (hyp_ARD, hyp_coeff, hyp_noise) 64 | 65 | return xb, hyperparams #, mu_y 66 | 67 | 68 | def SPGP_predict(X, y, xb, xt, hyperparams): 69 | (N, dim) = X.shape 70 | X = np.array(X) 71 | xt = np.array(xt) 72 | xb = np.array(xb) 73 | y = np.reshape(np.array(y), (N,1)) 74 | m = xb.shape[0] 75 | 76 | (hyp_ARD, hyp_coeff, hyp_noise) = hyperparams 77 | sigma = np.exp(hyp_noise) 78 | coeff = np.exp(hyp_coeff) 79 | 80 | K = RBF_kernel(xb, xb, hyp_ARD, hyp_coeff, is_self=True) 81 | L = alg.cholesky(K) 82 | K = RBF_kernel(xb, X, hyp_ARD, hyp_coeff) 83 | V = alg.solve(L, K) 84 | 85 | ep = 1 + np.reshape(coeff - np.sum(V * V, axis=0), (1,N)) / sigma 86 | ep_sqrt = np.sqrt(ep) 87 | V = V / ep_sqrt 88 | y = y / ep_sqrt.transpose() 89 | 90 | Lm = alg.cholesky(sigma * np.eye(m) + np.dot(V, V.transpose())) 91 | bet = alg.solve(Lm, np.dot(V, y)) 92 | 93 | K = RBF_kernel(xb, xt, hyp_ARD, hyp_coeff) 94 | lst = alg.solve(L, K) 95 | lmst = alg.solve(Lm, lst) 96 | 97 | mu = np.dot(bet.transpose(), lmst).transpose() 98 | lst_cols = np.sum(lst * lst, axis=0).transpose() 99 | lmst_cols = np.sum(lmst * lmst, axis=0).transpose() 100 | s2 = coeff - lst_cols + sigma * lmst_cols 101 | 102 | return mu, s2 103 | 104 | 105 | -------------------------------------------------------------------------------- /GP/GPexample.py: -------------------------------------------------------------------------------- 1 | # Author: Vincent Dubourg 2 | # Jake Vanderplas 3 | # Jan Hendrik Metzen s 4 | # License: BSD 3 clause 5 | 6 | import numpy as np 7 | from matplotlib import pyplot as plt 8 | 9 | from sklearn.gaussian_process import GaussianProcessRegressor 10 | from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C 11 | 12 | np.random.seed(1) 13 | 14 | 15 | def f(x): 16 | """The function to predict.""" 17 | return x * np.sin(x) 18 | 19 | # ---------------------------------------------------------------------- 20 | # First the noiseless case 21 | X = np.atleast_2d([-1., -3., -5., -6., -7., -8., 1, 3, 5, 6, 7, 8]).T 22 | print('X is', X) 23 | 24 | # Observations 25 | y = f(X).ravel() 26 | print('y is', y) 27 | 28 | # Mesh the input space for evaluations of the real function, the prediction and 29 | # its MSE 30 | x = np.atleast_2d(np.linspace(-10, 10, 1000)).T 31 | 32 | # Instanciate a Gaussian Process model 33 | kernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2)) 34 | gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9) 35 | 36 | # Fit to data using Maximum Likelihood Estimation of the parameters 37 | gp.fit(X, y) 38 | 39 | # Make the prediction on the meshed x-axis (ask for MSE as well) 40 | y_pred, sigma = gp.predict(x, return_std=True) 41 | 42 | # Plot the function, the prediction and the 95% confidence interval based on 43 | # the MSE 44 | fig = plt.figure() 45 | plt.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$') 46 | plt.plot(X, y, 'r.', markersize=10, label=u'Observations') 47 | plt.plot(x, y_pred, 'b-', label=u'Prediction') 48 | plt.fill(np.concatenate([x, x[::-1]]), 49 | np.concatenate([y_pred - 1.9600 * sigma, 50 | (y_pred + 1.9600 * sigma)[::-1]]), 51 | alpha=.5, fc='b', ec='None', label='95% confidence interval') 52 | plt.xlabel('$x$') 53 | plt.ylabel('$f(x)$') 54 | plt.ylim(-10, 20) 55 | plt.legend(loc='upper left') 56 | 57 | # ---------------------------------------------------------------------- 58 | # now the noisy case 59 | #X = np.linspace(0.1, 9.9, 20) 60 | X = np.atleast_2d([-1., -3., -5., -6., -7., -8., 1, 3, 5, 6, 7, 8]).T 61 | #X = np.atleast_2d(X).T 62 | 63 | # Observations and noise 64 | y = f(X).ravel() 65 | dy = 0.5 + 1.0 * np.random.random(y.shape) 66 | noise = np.random.normal(0, dy) 67 | y += noise 68 | 69 | # Instanciate a Gaussian Process model 70 | gp = GaussianProcessRegressor(kernel=kernel, alpha=(dy / y) ** 2, 71 | n_restarts_optimizer=10) 72 | 73 | # Fit to data using Maximum Likelihood Estimation of the parameters 74 | gp.fit(X, y) 75 | 76 | # Make the prediction on the meshed x-axis (ask for MSE as well) 77 | y_pred, sigma = gp.predict(x, return_std=True) 78 | 79 | # Plot the function, the prediction and the 95% confidence interval based on 80 | # the MSE 81 | fig = plt.figure() 82 | plt.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$') 83 | plt.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations') 84 | plt.plot(x, y_pred, 'b-', label=u'Prediction') 85 | plt.fill(np.concatenate([x, x[::-1]]), 86 | np.concatenate([y_pred - 1.9600 * sigma, 87 | (y_pred + 1.9600 * sigma)[::-1]]), 88 | alpha=.5, fc='b', ec='None', label='95% confidence interval') 89 | plt.xlabel('$x$') 90 | plt.ylabel('$f(x)$') 91 | plt.ylim(-10, 20) 92 | plt.legend(loc='upper left') 93 | 94 | plt.show() 95 | -------------------------------------------------------------------------------- /GP/GPtools.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Mon Jan 25 21:04:59 2016 4 | 5 | @author: Mitch 6 | """ 7 | 8 | import numpy as np 9 | import pylab 10 | from scipy.stats import norm 11 | import matplotlib.pyplot as plt 12 | 13 | def plotGPmodel(GP, xmin=-15, xmax=15, cov=False): 14 | mu = [] 15 | xvals = [] 16 | for i in range(1000): 17 | x = xmin + i*(xmax - xmin) / 1000.0 18 | xvals.append(x) 19 | mu.append(GP.predict(np.array(x,ndmin=2))[int(cov)]) 20 | 21 | plt.scatter(xvals,mu) 22 | plt.show() 23 | 24 | def vify(x, i): 25 | return np.array(x.iloc[i,:],ndmin=2) 26 | 27 | def scat(y1,y2,fname=None): 28 | plt.scatter(range(len(y1)),y1,s=10,lw=0) 29 | plt.scatter(range(len(y1)),y2,s=10,lw=0,c=u'r') 30 | if(fname is not None): 31 | plt.savefig(fname,dpi=128) 32 | else: 33 | plt.show() 34 | 35 | def BVplot(GP, f, xmin=-2.5, xmax=2.5, fname=None): 36 | xx = [xmin + (i/100.0)*(xmax-xmin) for i in range(100)] 37 | xxx = [xmin + (i/1000.0)*(xmax-xmin) for i in range(1000)] 38 | vec = np.reshape(np.array(xx),(100,1)) 39 | vecx = np.reshape(np.array(xxx),(1000,1)) 40 | pred,var = GP.predict(vec) 41 | var = np.reshape(np.diag(var),pred.shape) 42 | BVs = GP.BV 43 | fy = f(vecx) 44 | plt.errorbar(xx,pred,yerr=2*np.sqrt(var))#,c=u'b') 45 | plt.scatter(xxx,fy,s=10,lw=1,c=u'k') 46 | plt.scatter(BVs, [0 for x in BVs], s=30, c=u'r') 47 | plt.xlabel('x') 48 | plt.ylabel('y') 49 | if(fname is None): 50 | plt.show() 51 | else: 52 | plt.savefig(fname, dpi=128) 53 | 54 | def errplot(y1,y2,fname=None,xlabel='Iteration number', ylabel='',mean=True): 55 | numiter = len(y1[0]) 56 | if(not mean): 57 | plt.errorbar(range(numiter),np.median(y1,axis=0),yerr=np.std(y1,axis=0)/np.sqrt(len(y1))) 58 | plt.errorbar(range(numiter),np.median(y2,axis=0),yerr=np.std(y2,axis=0)/np.sqrt(len(y2)),c=u'r') 59 | else: 60 | plt.errorbar(range(numiter),np.mean(y1,axis=0),yerr=np.std(y1,axis=0)/np.sqrt(len(y1))) 61 | plt.errorbar(range(numiter),np.mean(y2,axis=0),yerr=np.std(y2,axis=0)/np.sqrt(len(y2)),c=u'r') 62 | plt.xlabel(xlabel) 63 | plt.ylabel(ylabel) 64 | if(fname is not None): 65 | plt.savefig(fname,dpi=128) 66 | else: 67 | plt.show() 68 | 69 | def GPheatmap(model, base, dim1, dim2, range1, range2, size=50, type='pred', ax=None, y_best=None, fname=None): 70 | # model: a GP that can predict 71 | # base: a vector to create a 2-D slice from 72 | # dim1: the index of the x-dimension to be sliced 73 | # dim2: the index of the y-dimension to be sliced 74 | # range1,range2: tuple (min,max) of plotting range for x,y 75 | # size: resolution of the heatmap 76 | # Var: untested, plots GP variance instead of predictions 77 | 78 | min1,max1 = range1 79 | min2,max2 = range2 80 | inc1 = (1.0 / size) * (max1 - min1) 81 | inc2 = (1.0 / size) * (max2 - min2) 82 | data = np.zeros(shape=(size*size,len(base))) 83 | for i in range(size): 84 | for j in range(size): 85 | base[dim1] = min1 + j * inc1 86 | base[dim2] = min2 + i * inc2 87 | data[i*size + j] = base 88 | 89 | pred,var = model.predict(data) 90 | if(type=='EI'): 91 | diff = pred - y_best 92 | var_vec = np.sqrt(np.reshape(np.diag(var),(size*size,1))) 93 | Z = diff / var_vec 94 | EI = diff * norm.cdf(Z) + var_vec * norm.pdf(Z) 95 | EI = np.nan_to_num(EI) 96 | mat = np.reshape(EI,(size,size)) 97 | else: 98 | Mp = np.reshape(pred,(size,size)) 99 | mat = Mp 100 | if(type=='var'): 101 | Mv = np.reshape(np.diag(var),(size,size)) 102 | mat = Mv 103 | 104 | if(ax is None): 105 | plotter = pylab 106 | xt = pylab.xticks 107 | yt = pylab.yticks 108 | else: 109 | plotter = ax 110 | xt = ax.set_xticks 111 | yt = ax.set_yticks 112 | 113 | img = plotter.pcolor(mat) 114 | 115 | plt.colorbar(img,ax=ax) 116 | xlabels = [str(np.around(min1 + j * inc1 * int(size / 5.0),decimals=2)) for j in range(6)] 117 | ylabels = [str(np.around(min2 + i * inc2 * int(size / 5.0),decimals=2)) for i in range(6)] 118 | xt([i*int(size / 5.0) for i in range(6)], xlabels) 119 | yt([i*int(size / 5.0) for i in range(6)], ylabels) 120 | 121 | if(ax is not None): 122 | ax.set_xticklabels(xlabels) 123 | ax.set_yticklabels(ylabels) 124 | return 125 | 126 | if(fname is None): 127 | pylab.show() 128 | else: 129 | pylab.savefig(fname,dpi=128) 130 | 131 | def regrets(y1,y2): 132 | y1 = np.array(y1) 133 | y2 = np.array(y2) 134 | maxs = np.reshape(np.max(np.concatenate((y1,y2),axis=1),axis=1),(y1.shape[0],1)) 135 | return (maxs - y1, maxs - y2) 136 | 137 | def rregrets(r1,r2,orig=True): 138 | if(orig): 139 | r1,r2 = regrets(r1,r2) 140 | sum1 = np.reshape(np.sum(r1,axis=1),(r1.shape[0],1)) 141 | sum2 = np.reshape(np.sum(r2,axis=1),(r2.shape[0],1)) 142 | rr1 = np.zeros(shape=r1.shape) 143 | rr2 = np.zeros(shape=r2.shape) 144 | rr1[:,[0]] = sum1 145 | rr2[:,[0]] = sum2 146 | for i in range(1,rr1.shape[1]): 147 | rr1[:,[i]] = rr1[:,[i-1]] - r1[:,[i-1]] 148 | rr2[:,[i]] = rr2[:,[i-1]] - r2[:,[i-1]] 149 | 150 | return rr1,rr2 151 | -------------------------------------------------------------------------------- /GP/SPGPmodel.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Mon Dec 7 21:39:18 2015 4 | 5 | @author: Mitch 6 | """ 7 | 8 | from GP import SPGP_train, SPGP_predict 9 | 10 | class SPGP(object): 11 | def __init__(self): 12 | self.X = [] 13 | self.Y = [] 14 | self.xb = [] 15 | self.m = 0 16 | self.dim = 0 17 | self.hyps = () 18 | 19 | def fit(self,X,Y,m): 20 | self.X = X 21 | self.Y = Y 22 | self.m = m 23 | self.dim = X.shape[1] 24 | (self.xb, self.hyps) = SPGP_train(X,Y,m) 25 | 26 | # too slow in practice 27 | def update(self, x_new, y_new): 28 | self.X.loc[len(self.X.index)] = x_new[0] 29 | self.Y[len(self.Y)] = y_new 30 | (self.xb, self.hyps) = SPGP_train(self.X,self.Y,min(self.m,self.X.shape[0])) 31 | 32 | 33 | def predict(self,X): 34 | return SPGP_predict(self.X, self.Y, self.xb, X, self.hyps) 35 | 36 | 37 | -------------------------------------------------------------------------------- /GP/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/GP/__init__.py -------------------------------------------------------------------------------- /GP/discrete_test_script.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Wed Jan 27 21:19:02 2016 4 | 5 | @author: Mitch 6 | 7 | Imports LCLS data and does trial optimization. 8 | """ 9 | 10 | import numpy as np 11 | import pandas as pd 12 | from GPtools import * 13 | import OnlineGP 14 | from BasicInterfaces import TestInterface, GPint 15 | from numpy.random import randn 16 | import BayesOpt_oldcopy as BOpt 17 | 18 | np.random.seed(1) 19 | 20 | # load data 21 | data_file = './d1.csv' 22 | data = pd.read_csv(data_file) 23 | X = data.iloc[:,:-1] 24 | y = data.iloc[:,-1] 25 | 26 | # bound the acquisition: typically leads to better performance 27 | mins = X.min(axis=0) 28 | maxs = X.max(axis=0) 29 | #bnds = tuple([(mins[i],maxs[i]) for i in range(len(mins))]) 30 | bnds = None 31 | 32 | # guess at hyperparameters for now 33 | data_hyps = (-2 * np.array(np.log((X.max() - X.min()) / 4.0),ndmin=2), np.log(y.var() + .1), np.log(y.var() / 4 + .01)) 34 | 35 | # set up run parameters 36 | runs = 1 37 | num_iter = 50 38 | num_train = 0 39 | numBV = 5 40 | noise = 0.0 41 | xi = .8 42 | 43 | # initialize for data collection 44 | model1 = range(runs) 45 | model2 = range(runs) 46 | opt1 = range(runs) 47 | opt2 = range(runs) 48 | res1 = range(runs) 49 | res2 = range(runs) 50 | 51 | 52 | for i in range(runs): 53 | model1[i] = OnlineGP.OGP(X.shape[1],data_hyps,weighted=False, maxBV=numBV, prmean=0) 54 | model2[i] = OnlineGP.OGP(X.shape[1],data_hyps, weighted=True, maxBV=numBV, prmean=0) 55 | 56 | # initial training 57 | train_data = data.copy() 58 | train_data.apply(np.random.shuffle,axis=0) 59 | train_data = train_data.iloc[:num_train,:] 60 | model1[i].fit(train_data.iloc[:,:-1],train_data.iloc[:,-1]) 61 | model2[i].fit(train_data.iloc[:,:-1],train_data.iloc[:,-1]) 62 | 63 | # mock machine interfaces 64 | intfc1 = TestInterface(vify(X,0)) 65 | intfc2 = TestInterface(vify(X,0)) 66 | 67 | # initialize optimizers 68 | opt1[i] = BOpt.BayesOpt(model1[i], intfc1, acq_func='testEI', xi=xi, bounds=bnds, alt_param=data) 69 | opt2[i] = BOpt.BayesOpt(model2[i], intfc2, acq_func='testEI', xi=xi, bounds=bnds, alt_param=data) 70 | 71 | # do optimization 72 | for j in range(num_iter): 73 | opt1[i].OptIter() 74 | opt2[i].OptIter() 75 | 76 | # collect data 77 | res1[i] = np.reshape(opt1[i].Y_obs[1:],(num_iter)) 78 | res2[i] = np.reshape(opt2[i].Y_obs[1:],(num_iter)) 79 | 80 | # plot results 81 | errplot(res1,res2) 82 | -------------------------------------------------------------------------------- /GP/example_script.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Mon Jan 25 16:19:03 2016 4 | 5 | @author: Mitch 6 | 7 | Script to show optimization results on toy problem. 8 | 9 | Currently more sensitive to initial conditions than expected. Also currently 10 | uses hyperparameters that are clearly suboptimal - results are inconsistent 11 | based on initial sampling and whether bounds are given for acquisition. 12 | 13 | Probably could find a better toy problem that is nonnegative, which 14 | might imporove consistency. 15 | 16 | """ 17 | 18 | import numpy as np 19 | import pandas as pd 20 | from GPtools import * 21 | from OnlineGP import OGP 22 | from SPGPmodel import SPGP 23 | from BasicInterfaces import fint 24 | from numpy.random import rand 25 | import BayesOptimization as BOpt 26 | 27 | runs = 20 28 | num_iter = 60 29 | num_init = 10 30 | numBV = 5 31 | 32 | np.random.seed(1) 33 | 34 | # use this just to compute hyperparams 35 | fullGP = SPGP() 36 | rand_sample = (rand(100,1) - .5) * 4 37 | function = fint(rand_sample[0]) 38 | fullGP.fit(pd.DataFrame(rand_sample), function.f(rand_sample), 100) 39 | hyps = fullGP.hyps 40 | 41 | 42 | ## setup for data collection 43 | #res1 = range(runs) 44 | #res2 = range(runs) 45 | #x1 = range(runs) 46 | #x2 = range(runs) 47 | #model1 = range(runs) 48 | #model2 = range(runs) 49 | # 50 | #for j in range(runs): 51 | # 52 | # # generate initial data 53 | # init_x = (rand(1,1) - .5) * 4 54 | # p_data = np.zeros(shape=(num_init,2)) 55 | # int1 = fint(init_x) 56 | # int2 = fint(init_x) 57 | # 58 | # p_data[:,[0]] = (rand(num_init,1) - .5)*4 59 | # p_data[:,[1]] = int1.f(p_data[:,[0]]) 60 | # 61 | # 62 | # # initialize optimizers 63 | # #bnds = tuple([(-1.9,1.9)]) 64 | # bnds = None 65 | # model1[j] = OGP(1,hyps, maxBV=numBV) 66 | # opt1 = BOpt.BayesOpt(model1[j], int1, xi=0.01, acq_func='EI', bounds=bnds, prior_data=pd.DataFrame(p_data)) 67 | # model2[j] = OGP(1,hyps, weighted=True, maxBV=numBV) 68 | # opt2 = BOpt.BayesOpt(model2[j], int2, xi=0.01, acq_func='EI', bounds=bnds, prior_data=pd.DataFrame(p_data)) 69 | # 70 | # # iterate, do optimization, collect data 71 | # res1[j] = [] 72 | # res2[j] = [] 73 | # x1[j] = [] 74 | # x2[j] = [] 75 | # for i in range(num_iter): 76 | # opt1.OptIter() 77 | # #opt2.OptIter() 78 | # x1[j].append(opt1.acquire()) 79 | # #x2[j].append(opt2.acquire()) 80 | # res1[j].append(int1.f(x1[j][-1])[0][0]) 81 | # #res2[j].append(int1.f(x2[j][-1])[0][0]) 82 | # 83 | ## performance plot 84 | ##errplot(res1,res2) 85 | #errplot(res1) 86 | 87 | # can plot individual GP models as well: 88 | # BVplot(model1[0], function.f) 89 | 90 | 91 | -------------------------------------------------------------------------------- /GP/extractGP.py: -------------------------------------------------------------------------------- 1 | 2 | import OnlineGP 3 | import scipy.io as sio 4 | 5 | def getGP(fname): 6 | try: 7 | raw_data = sio.loadmat(fname) 8 | except: 9 | return 'Bad file.' 10 | data = raw_data['data'] 11 | 12 | print 'data = ',data 13 | 14 | C = data['C'][0][0] 15 | alpha = data['alpha'][0][0] 16 | KB = data['KB'][0][0] 17 | KBinv = data['KBinv'][0][0] 18 | w = data['weighted'][0][0] 19 | cov = data['covar_params'][0][0][0] 20 | cov = (cov[0],cov[1]) 21 | BV = data['BV'][0][0] 22 | noise = data['noise_var'][0][0] 23 | 24 | size = BV.shape 25 | gp = OnlineGP.OGP(size[1],(cov[0], cov[1], noise),maxBV=size[0],weighted=w) 26 | gp.noise_var = noise 27 | gp.alpha = alpha 28 | gp.C = C 29 | gp.KB = KB 30 | gp.KBinv = KBinv 31 | gp.BV = BV 32 | 33 | return gp 34 | -------------------------------------------------------------------------------- /GP/gaussian_process_sklearn.py: -------------------------------------------------------------------------------- 1 | """ 2 | Written by S. Tomin, 2017 3 | """ 4 | import numpy as np 5 | from matplotlib import pyplot as plt 6 | from sklearn.gaussian_process import GaussianProcessRegressor 7 | from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C 8 | from scipy import optimize 9 | from copy import deepcopy 10 | import time 11 | import matplotlib.cm as cm 12 | 13 | class GP: 14 | def __init__(self): 15 | self.x_search = np.array([]) 16 | self.x_obs = np.array([]) 17 | self.y_obs = np.array([]) 18 | self.y_sigma_obs = np.array([]) 19 | self.y_pred = np.array([]) 20 | self.sigma_y = 0 21 | 22 | # RBF kernel 23 | self.rbf_length_scale = 1 24 | self.rbf_length_scale_bounds = (0.01, 100) 25 | # ConstantKernel 26 | self.ck_const_value = 1.0 27 | self.ck_const_value_bounds = (1e-05, 100000.0) 28 | self.n_restarts_optimizer = 10 29 | self.max_iter = 40 30 | self.pen_max = 100 31 | self.ytol = 0.001 32 | self.xtol = 0.001 33 | self.opt_ctrl = None 34 | 35 | def append_new_data(self, x_new, y_obs, sigma_y_obs): 36 | self.x_obs = np.append(self.x_obs, [x_new], axis=0) 37 | self.y_obs = np.append(self.y_obs, y_obs) 38 | self.y_sigma_obs = np.append(self.y_sigma_obs, sigma_y_obs) 39 | 40 | def fit(self): 41 | """ 42 | RBF(length_scale=1.0, length_scale_boun ds=(1e-05, 100000.0)) 43 | k(x_i, x_j) = exp(-1 / 2 d(x_i / length_scale, x_j / length_scale)^2) 44 | :return: 45 | """ 46 | # Instanciate a Gaussian Process model 47 | #kernel = ConstantKernel(self.ck_const_value, self.ck_const_value_bounds)\ 48 | # * RBF(self.rbf_length_scale, self.rbf_length_scale_bounds) 49 | #kernel = ConstantKernel(self.ck_const_value, self.ck_const_value_bounds)* RBF(self.rbf_length_scale, self.rbf_length_scale_bounds) 50 | kernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2)) 51 | # Instanciate a Gaussian Process model 52 | if self.sigma_y != 0: 53 | self.alpha = (self.y_sigma_obs / self.y_obs) ** 2 54 | else: 55 | self.alpha = 1e-10 56 | print('alpha is', self.alpha) 57 | #self.gp = GaussianProcessRegressor(kernel=kernel, alpha=self.alpha, 58 | #n_restarts_optimizer=self.n_restarts_optimizer) 59 | self.gp = GaussianProcessRegressor(kernel=kernel, alpha = self.alpha, n_restarts_optimizer=9) 60 | # Fit to data using Maximum Likelihood Estimation of the parameters 61 | #print('self.x and self.y', self.x_obs, self.y_obs) 62 | print('trying to fit', self.x_obs, self.y_obs) 63 | self.gp.fit(self.x_obs, self.y_obs) 64 | print('success') 65 | 66 | def acquire_simplex(self): 67 | # Make the prediction on the meshed x-axis (ask for MSE as well) 68 | print('acquire simplex') 69 | def func(x): 70 | for i, xi in enumerate(x): 71 | if self.x_search[0][i] > xi or xi > self.x_search[-1][i]: 72 | print("exceed limits ") 73 | return self.pen_max 74 | 75 | y_pred, sigma = self.gp.predict(np.atleast_2d(x), return_std=True) 76 | self.sigma = sigma 77 | return y_pred 78 | 79 | y_pred, sigma = self.gp.predict(self.x_obs, return_std=True) 80 | x = self.x_obs[np.argmin(y_pred)] 81 | res = optimize.fmin(func, x) 82 | return res 83 | 84 | def acquire(self): 85 | # Make the prediction on the meshed x-axis (ask for MSE as well) 86 | y_pred, sigma = self.gp.predict(self.x_search, return_std=True) 87 | x = self.x_search[np.argmin(y_pred)] 88 | return x 89 | 90 | def minimize(self, error_func, x): 91 | # weighting for exploration vs exploitation in the GP at the end of scan, alpha array goes from 1 to zero 92 | print('making it to fit') 93 | self.fit() 94 | print('made it to iteration') 95 | for i in range(self.max_iter): 96 | # get next point to try using acquisition function 97 | if self.opt_ctrl != None and self.opt_ctrl.kill == True: 98 | print('GP: Killed from external process') 99 | break 100 | print('made it further') 101 | start = time.time() 102 | x_next = self.acquire() 103 | print("acquire ", start - time.time(), " sec") 104 | 105 | y_new = error_func(x_next.flatten()) 106 | 107 | self.append_new_data(x_next, y_new, sigma_y_obs=self.sigma_y) 108 | # update the model (may want to add noise if using testEI) 109 | self.fit() 110 | if i>3 and np.linalg.norm((self.x_obs[-3] - self.x_obs[-1])) <= self.xtol: 111 | break 112 | return self.x_obs[-1] 113 | -------------------------------------------------------------------------------- /GP/heatmap.py: -------------------------------------------------------------------------------- 1 | 2 | import matplotlib 3 | import numpy as np 4 | import time 5 | from datetime import datetime as dt 6 | import os 7 | import matplotlib.cm as cm 8 | import matplotlib.mlab as mlab 9 | import matplotlib.pyplot as plt 10 | 11 | #matplotlib.rcParams['xtick.direction'] = 'out' 12 | #matplotlib.rcParams['ytick.direction'] = 'out' 13 | 14 | 15 | def plotheatmap(function, fargs, rangex, rangey, ngrid=25, xlabel='Device 1', ylabel='Device 2', description='', series = None): 16 | #function - pointer to the function to evaluate 17 | #rangex & rangey - [xlow,xhigh] 18 | #ngrid - number of points to plot 19 | #xlabel & ylabel - axes labels 20 | #description - plot title 21 | #series - list of [x,y] pairs to plot showing the optimization path 22 | 23 | # evaluate function 24 | xs = np.linspace(min(rangex),max(rangex),ngrid) 25 | ys = np.linspace(min(rangey),max(rangey),ngrid) 26 | 27 | ## for vectorizable functions 28 | #xys = np.zeros([ngrid,ngrid,2]) 29 | #for i in range(ngrid): 30 | #for j in range(ngrid): 31 | #xys[i][j] = [xs[i],ys[j]] 32 | #xys = xys.reshape([ngrid*ngrid,2]) 33 | #zs = function(xys,*fargs) 34 | #Z = zs.reshape([ngrid,ngrid]) 35 | 36 | # for non-vectorizable functions 37 | Z = np.zeros([ngrid,ngrid]) 38 | for i in range(ngrid): 39 | for j in range(ngrid): 40 | Z[i][j] = function(np.array([[xs[i],ys[j]]],ndmin=2),*fargs)[0] 41 | 42 | X, Y = np.meshgrid(xs, ys) 43 | 44 | # bug in matplotlib prevents plotting Z with all zeros so perturb if this is the case 45 | 46 | if(np.sum(Z)==0): 47 | Zshape = np.shape(Z) 48 | Z = Z + 1.e-6 * np.random.randn(Zshape[0], Zshape[1]) 49 | print ('WARNING: z-values are all zero so adding a small random field to it') 50 | 51 | # Create a simple contour plot with labels using default colors. The 52 | # inline argument to clabel will control whether the labels are draw 53 | # over the line segments of the contour, removing the lines beneath 54 | # the label 55 | plt.figure() 56 | #print 'np.shape(X) = ',np.shape(X) 57 | #print 'np.shape(Y) = ',np.shape(Y) 58 | #print 'np.shape(Z.T) = ',np.shape(Z.T) 59 | #CS = plt.contour(X, Y, Z.T, cmap='viridis') 60 | CS = plt.contour(X, Y, Z.T, cmap='jet') 61 | plt.clabel(CS, inline=1, fontsize=10) 62 | plt.xlabel(xlabel) 63 | plt.ylabel(ylabel) 64 | plt.title(description) 65 | #plt.show() # doesn't work with ocelot's threading 66 | 67 | # Ocelot path series 68 | try: 69 | if series is not None: 70 | series = np.array(series) 71 | #print 'series = ', series 72 | ptsx = series[:,0] 73 | ptsy = series[:,1] 74 | #print 'ptsx = ', ptsx 75 | #print 'ptsy = ', ptsy 76 | plt.plot(ptsx, ptsy, 'k--') 77 | plt.plot(ptsx, ptsy, 'k.', ms=8) 78 | except: 79 | pass 80 | 81 | # plot path 82 | try: # if running under a profile, save to profile directory 83 | #username = os.environ['PHYSICS_USER'] 84 | #if username == 'none': 85 | #username = 'Ocelot' 86 | #basepath = '/home/physics/' + username + '/OcelotPlots/' 87 | 88 | # save to a directory under the user's home directory 89 | homepath = os.environ['HOME'] 90 | basepath = homepath + '/ocelot/plots/' 91 | 92 | year = str(dt.fromtimestamp(time.time()).strftime('%Y')) 93 | month = str(dt.fromtimestamp(time.time()).strftime('%m')) 94 | day = str(dt.fromtimestamp(time.time()).strftime('%d')) 95 | basepath = str(basepath+year+'/'+year+'-'+month+'/'+year+'-'+month+'-'+day+'/') 96 | 97 | except: 98 | basepath = os.environ['PWD']+'/' 99 | 100 | try: 101 | os.makedirs(basepath) # make it if it doesn't exist 102 | except: 103 | pass 104 | 105 | # plot file path 106 | timestr = time.strftime("%Y%m%d-%H%M%S") + str(round(time.time()%1*1000)/1000)[1:] 107 | fpath = basepath + 'heatmap-' + function.__name__ + '-' + timestr + '.png' 108 | 109 | # save plot 110 | plt.savefig(fpath, bbox_inches='tight') 111 | plt.close('all') # close all open figures to prevent memory leaks 112 | print('Saved contour plot for ' + function.__name__ + ' to ' + fpath) 113 | 114 | #ax1.scatter(ptsx,ptsy,s=50,c=data.iloc[:moment+1,-1]) 115 | #ax1.set_xlim(axes[:2]) 116 | #ax1.set_ylim(axes[2:]) 117 | -------------------------------------------------------------------------------- /GP/lcls_opt_script.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Wed Jan 27 21:19:02 2016 4 | 5 | @author: Mitch 6 | 7 | Imports LCLS data and does trial optimization with unweighted and weighted 8 | online GPs. 9 | """ 10 | 11 | import numpy as np 12 | import pandas as pd 13 | from GPtools import * 14 | import OnlineGP 15 | import SPGPmodel 16 | from BasicInterfaces import TestInterface, GPint 17 | from numpy.random import randn 18 | import BayesOptimization as BOpt 19 | 20 | def run(): 21 | 22 | np.random.seed(1) 23 | 24 | # load data 25 | data = pd.read_csv('../data.csv') 26 | dt = data[(data.iloc[:,1] > 0.2) & (data.iloc[:,1] < 6.0)] 27 | ctrl = [x for x in data.columns if x[-5:] == 'BCTRL'] 28 | X = dt[ctrl] 29 | y = dt.iloc[:,1] 30 | 31 | # clear unneeded stuff from memory 32 | del data 33 | 34 | # set up data from a given event 35 | event_energy = 11.45 36 | Xsm = X.loc[X.iloc[:,0]==event_energy,:] 37 | Xsm = Xsm.iloc[50:2050,:] 38 | Ysm = np.array(dt.loc[X.iloc[:,0]==event_energy,dt.columns[1]]) 39 | Ysm = Ysm[50:2050] 40 | XYsm = Xsm.copy() 41 | XYsm['y'] = Ysm 42 | mins = Xsm.min(axis=0) 43 | maxs = Xsm.max(axis=0) 44 | 45 | # bound the acquisition: leads to better performance and lessens 46 | # the improvements from weighting 47 | #bnds = tuple([(mins[i],maxs[i]) for i in range(len(mins))]) 48 | bnds = None 49 | 50 | # build a sparse GP and optimize its hyperparameters to use for online GP 51 | hprior = SPGPmodel.SPGP() 52 | hprior.fit(Xsm, Ysm, 300) 53 | data_hyps = hprior.hyps 54 | 55 | # train truth model, the high-res GP that stands in for a real-world machine 56 | prior = OnlineGP.OGP(17, data_hyps, weighted=False, maxBV=600, prmean=1) 57 | prior.fit(Xsm,Ysm) 58 | 59 | # set up run parameters 60 | runs = 40 61 | num_iter = 60 62 | numBV = 30 63 | noise = 0.0 64 | num_train = 100 65 | 66 | # initialize for data collection 67 | model1 = range(runs) 68 | model2 = range(runs) 69 | opt1 = range(runs) 70 | opt2 = range(runs) 71 | res1 = range(runs) 72 | res2 = range(runs) 73 | preds1 = range(runs) 74 | preds2 = range(runs) 75 | 76 | 77 | for i in range(runs): 78 | model1[i] = OnlineGP.OGP(17,data_hyps,weighted=False, maxBV=numBV, prmean=1)# prmean=prior_func, prmeanp=(mod,poly)) 79 | model2[i] = OnlineGP.OGP(17,data_hyps, weighted=True, maxBV=numBV, prmean=1) 80 | 81 | # mock machine interfaces using the big GP to supply y-values 82 | intfc1 = GPint(vify(Xsm,0),prior) 83 | intfc2 = GPint(vify(Xsm,0),prior) 84 | 85 | # need initial training or complex prior mean function to guide optimization 86 | train = XYsm.iloc[-1000:].sample(n=num_train) 87 | train.iloc[:,-1] += noise*randn(num_train) 88 | model1[i].fit(train.iloc[:,:-1],np.array(train.iloc[:,-1])) 89 | model2[i].fit(train.iloc[:,:-1],np.array(train.iloc[:,-1])) 90 | 91 | # initialize optimizers 92 | opt1[i] = BOpt.BayesOpt(model1[i], intfc1, acq_func='EI', xi=0, bounds=bnds)#, alt_param=XYsm) 93 | opt2[i] = BOpt.BayesOpt(model2[i], intfc2, acq_func='EI', xi=0, bounds=bnds)#, alt_param=XYsm) 94 | 95 | # do optimization 96 | for j in range(num_iter): 97 | opt1[i].OptIter() 98 | opt2[i].OptIter() 99 | 100 | # collect data 101 | res1[i] = np.reshape(opt1[i].Y_obs[1:],(num_iter)) 102 | res2[i] = np.reshape(opt2[i].Y_obs[1:],(num_iter)) 103 | preds1[i] = opt1[i].model.predict(np.array(Xsm))[0] 104 | preds2[i] = opt2[i].model.predict(np.array(Xsm))[0] 105 | 106 | # plot results 107 | 108 | if __name__ == "__main__": 109 | 110 | errplot(res1,res2) 111 | -------------------------------------------------------------------------------- /GP/parallelbasinhopping.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | from scipy.optimize import basinhopping 4 | import multiprocessing as mp 5 | 6 | # handle 'IOError: [Errno 4] Interrupted system call' errors from multiprocessing.Queue.get 7 | #https://stackoverflow.com/questions/14136195/what-is-the-proper-way-to-handle-in-python-ioerror-errno-4-interrupted-syst 8 | import errno 9 | def my_queue_get(queue, block=True, timeout=None): 10 | while True: 11 | try: 12 | return queue.get(block, timeout) 13 | except IOError, e: 14 | if e.errno != errno.EINTR: 15 | raise 16 | # Now replace instances of queue.get() with my_queue_get(queue), with other 17 | # parameters passed as usual. 18 | 19 | # see here https://eli.thegreenplace.net/2012/01/16/python-parallelizing-cpu-bound-tasks-with-multiprocessing/ 20 | # and here https://stackoverflow.com/questions/37060091/multiprocessing-inside-function 21 | 22 | def bworker(f,x0,bkwargs,out_q): 23 | # worker invoked in a process puts the results in the output queue out_q 24 | res = basinhopping(f, x0, **bkwargs) 25 | out_q.put([[res.x, res.fun[0][0]]]) 26 | 27 | # parallelize minimizations using different starting positions using multiprocessing, scipy.optimize.minimize 28 | def parallelbasinhopping(f,x0s,bkwargs): 29 | # f is fcn to minimize 30 | # x0s are positions to start search from 31 | # fargs are arguments to pass to f 32 | # margs are arguments to pass to scipy.optimize.minimize 33 | 34 | 35 | # Each process will get a queue to put its result in 36 | out_q = mp.Queue() 37 | 38 | # arguments to loop over 39 | args = [(f,x0,bkwargs,out_q) for x0 in x0s] 40 | 41 | # https://stackoverflow.com/questions/9786102/how-do-i-parallelize-a-simple-python-loop#9786225 42 | # also could try concurrent futures 43 | # import multiprocessing 44 | # pool = multiprocessing.Pool() 45 | # res = np.array(pool.map(minimizeone, args)) 46 | # res = np.array(pool.map(l, range(10))) 47 | 48 | # seems like this maybe be needed 49 | # https://stackoverflow.com/questions/37060091/multiprocessing-inside-function 50 | 51 | nprocs = len(x0s) 52 | procs = [] 53 | 54 | for i in range(nprocs): 55 | p = mp.Process( 56 | target=bworker, 57 | args=args[i]) 58 | procs.append(p) 59 | p.start() 60 | 61 | res = []; 62 | for i in range(nprocs): 63 | #res += out_q.get() 64 | res += my_queue_get(out_q) 65 | 66 | for p in procs: 67 | p.join() 68 | 69 | res = np.array(res) 70 | #print 'res = ', res 71 | res = res[res[:,1]==np.min(res[:,1])][0] 72 | #print 'selected min is ',res 73 | res = np.array(res[0]) 74 | 75 | return res 76 | -------------------------------------------------------------------------------- /RCDS/test1.py: -------------------------------------------------------------------------------- 1 | from rcdsClass import * 2 | 3 | def func_obj(p): 4 | '''Objective function for test 5 | Input: 6 | p : a column vector 7 | Output: 8 | obj : an floating number 9 | ''' 10 | 11 | obj = 0 12 | for ii in range(len(p)-1): 13 | obj -= 10*math.exp(-0.2*math.sqrt(p[ii]**2+p[ii+1]**2)) 14 | 15 | return obj 16 | # obj += np.random.randn()*g_noise 17 | 18 | def fuc_test0(x): 19 | return np.linalg.norm(x) 20 | 21 | def test0(): 22 | Nvar = 6 23 | g_vrange = np.matrix(np.ones((Nvar,2)))*150 24 | g_vrange[:,0] *= -1 25 | 26 | p0 = np.matrix(np.ones([Nvar,1]))*10.0 27 | x0 = np.divide(p0-g_vrange[:,0],g_vrange[:,1]-g_vrange[:,0]) 28 | 29 | y0 = fuc_test0(x0) 30 | print('x0',x0) 31 | print('y0',y0) 32 | 33 | 34 | step = 0.01 35 | g_noise = 0.001 36 | g_cnt = 0 37 | g_data = np.zeros([1,Nvar+2]) 38 | Imat = np.matrix(np.identity(Nvar)) 39 | 40 | rcds = RCDS(func_obj, g_noise, g_cnt, Nvar, g_vrange, g_data, Imat) 41 | #rcds = RCDS(fuc_test0, g_noise, g_cnt, Nvar, g_vrange, g_data, Imat) 42 | (xm,fm,nf)=rcds.powellmain(x0,step,Imat,maxIt=1000,maxEval=10000) 43 | print('start',x0) 44 | print('end',xm) 45 | print('start',y0) 46 | print('end',fm) 47 | print('end',fuc_test0(xm)) 48 | 49 | test0() 50 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # An Introduction to Ocelot Optimizer 2 | 3 | Ocelot optimizer is a platform for automated optimization of accelerator performance. 4 | 5 | It is an open source project and it is being developed by physicists from [The European XFEL](http://www.xfel.eu/), [DESY](http://www.desy.de/), [SLAC](https://www6.slac.stanford.edu). 6 | 7 | ## Use cases 8 | 9 | The Optimizer is used for many different types of optimization. 10 | 11 | * **FEL facilities:** 12 | 13 | - FEL pulse energy maximization with: 14 | - Phase-shifters (EuXFEL) 15 | - Orbit in low energy sections (EuXFEL) 16 | - Matching quads (LCLS & EuXFEL) 17 | - RF settings (EuXFEL) 18 | - Launch orbit and orbit inside an undulator (EuXFEL) 19 | - Local dispersion correction (EuXFEL & FLASH) 20 | - HOM signal minimization in cavities (FLASH) 21 | * **Storage rings (tests):** 22 | - Injection efficiency optimization (Kurchatov Institute) 23 | - Beam life time (test at BESSY-II) 24 | 25 | ## Ocelot schematic 26 | 27 | ![](docs/readme/ocelot_schematic.png) 28 | 29 | 30 | From the beginning, Ocelot Optimizer was used by a few facilities (EuXFEL, LCLS, FLASH). To keep compatibility of the tool between them, an abstraction layer called Machine Interface was developed. The Machine Interface communicates with an accelerator control system and contains all machine related specifics. 31 | Algorithm API is designed to be a layer of abstraction between the Optimizer and an optimization methods. 32 | 33 | #### Ocelot Optimizer includes: 34 | * **Machine Interfaces:** 35 | - XFELMachineIterface 36 | - LCLSMachineInterface 37 | - FLASHMachineInterface 38 | - BESSYMachineInterface 39 | - MultinormalInterface (interface for algorithms testing) 40 | * **Optimization Methods:** 41 | - [Simplex (Nelder–Mead method)](https://en.wikipedia.org/wiki/Nelder–Mead_method) 42 | - [Bayesian optimization w/ a GP](http://accelconf.web.cern.ch/accelconf/ipac2016/papers/wepow055.pdf) 43 | - [RCDS](https://www.slac.stanford.edu/pubs/slacpubs/15250/slac-pub-15414.pdf) 44 | - [Extremum Seeking](https://www.sciencedirect.com/science/article/pii/S0005109816300553) 45 | 46 | 47 | ## Graphical User Interface 48 | 49 | Ocelot Optimizer was designed to cover as many as possible optimization problems especially during an accelerator commissioning. 50 | We tried to build GUI which reflects that goal. With GUI you can: 51 | * Select different optimizers (Simplex, GP, ES, …) 52 | * Select any group of devices 53 | * Craft your objective function  54 | - Simple PV 55 | - Do math on up to 5 PVs 56 | - Statistics: mean, stdev, quantiles 57 | - Write python script (loaded/edited via GUI) 58 | - Option to have simulated objective to test optimization methods 59 | * Data browser - choose any point from the history of the last optimization 60 | * Save predefined settings & routines 61 | 62 | 63 | ![](docs/readme/ocelot_anim.gif) 64 | 65 | ## How to get started 66 | 67 | * Download @GitHub: https://github.com/ocelot-collab/optimizer 68 | 69 | * Try “simulation” interface and test different optimization methods: 70 | >> python generic_optim.py MultinormalInterface 71 | 72 | * to ask help: 73 | >> python generic_optim.py --help 74 | 75 | * Edit template machine interface to map in your control system: 76 | - [DemoInterface](mint/demo/demo_interface.py) 77 | >> python generic_optim.py MultinormalInterface 78 | 79 | ```python 80 | class DemoInterface(MachineInterface): 81 | name = 'DemoInterface' 82 | 83 | def __init__(self, args=None): 84 | super(DemoInterface, self).__init__(args=args) 85 | 86 | # self.config_dir is path to a directory where a default config will be saved (the tool state) 87 | # self.config_dir = "/parameters/" is default path in the parent class MachineInterface 88 | self.config_dir = os.path.join(self.config_dir, "demo") # /parameters/demo 89 | 90 | # self.path2jsondir is a path to a folder where optimization histories will be saved in json format 91 | # by default self.path2jsondir = on the same level that 92 | # the folder will be created automatically 93 | 94 | # flag from LCLSInterface which not allow Optimizer to write to control system 95 | self.read_only = False 96 | 97 | def get_value(self, channel): 98 | print("Called get_value for channel: {}.".format(channel)) 99 | return random.random() 100 | 101 | def set_value(self, channel, val): 102 | print("Called set_value for channel: {}, with value: {}".format(channel, val)) 103 | 104 | ``` 105 | 106 | As a second step we recommend to have a look to [XFELMachineInterface](mint/xfel/xfel_interface.py) 107 | and also [TestMachineInterface](mint/xfel/xfel_interface.py) 108 | which you can run using: 109 | 110 | >> python generic_optim.py --devmode 111 | 112 | 113 | See the parent class [MachineInterface](mint/opt_objects.py) to have more configurable options. -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function 2 | 3 | __version__ = "v1.1" 4 | 5 | __all__ = ['Action', 'OptControl', 6 | 7 | 'Optimizer', 'Minimizer', 8 | 9 | 'Device', 'MachineInterface', 10 | 11 | 'MachineStatus',"Target", 12 | 13 | "Simplex", "SimplexNorm"] 14 | 15 | 16 | 17 | from mint.mint import * 18 | 19 | from mint.opt_objects import * 20 | 21 | from op_methods.simplex import * 22 | 23 | -------------------------------------------------------------------------------- /cli_examples/optimization_sequence.py: -------------------------------------------------------------------------------- 1 | #from optimizer import * 2 | import sys 3 | from mint.opt_objects import * 4 | from mint.xfel.xfel_interface import * 5 | from mint.mint import * 6 | from op_methods.simplex import * 7 | import matplotlib.pyplot as plt 8 | import numpy as np 9 | 10 | import logging 11 | logging.basicConfig(level=logging.DEBUG) 12 | 13 | # init machine interface 14 | mi = TestMachineInterface(args=None) 15 | 16 | # timeout between device setting and signal reading 17 | timeout = 0.1 18 | 19 | # type of optimization 20 | maximization = True 21 | 22 | # max iteration 23 | max_iter = 35 24 | 25 | 26 | # redefine method get_value for Target. 27 | """ 28 | # it is a real example which will work with TestMachineInterface and XFELMachineInterface 29 | def get_value(): 30 | val = mi.get_value("XFEL.FEL/XGM.PREPROCESSING/XGM.2595.T6.CH0/RESULT.TD") 31 | return val 32 | 33 | target = Target() 34 | target.mi = mi 35 | target.nreadings = 35 # number of readings 36 | target.interval = 0.01 # in [s] between readings 37 | target.get_value = get_value 38 | 39 | target2 = Target() 40 | target.mi = mi 41 | target2.nreadings = 35 42 | target2.interval = 0.01 # in sec between readings 43 | target2.get_value = get_value 44 | """ 45 | # to be able to run "real" optimization we can use simple test target class 46 | class Target_test(Target): 47 | def __init__(self, mi=None, eid=None): 48 | super(Target_test, self).__init__(eid=eid) 49 | self.mi = mi 50 | 51 | def get_value(self): 52 | values = np.array([dev.get_value() for dev in self.devices]) 53 | return np.sum(np.exp(-np.power((values - np.ones_like(values)), 2) / 5.)) 54 | 55 | target = Target_test() 56 | target.mi = mi 57 | target.nreadings = 35 # number of readings 58 | target.interval = 0.01 # in [s] between readings 59 | 60 | target2 = Target_test() 61 | target.mi = mi 62 | target2.nreadings = 35 63 | target2.interval = 0.01 # in sec between readings 64 | 65 | 66 | # create devices for ACTION 1 67 | pvs = [ 68 | "XFEL.FEL/UNDULATOR.SASE2/CBY.CELL16.SA2/FIELD.OFFSET", 69 | "XFEL.FEL/UNDULATOR.SASE2/CAX.CELL17.SA2/FIELD.OFFSET", 70 | "XFEL.FEL/UNDULATOR.SASE2/CBX.CELL17.SA2/FIELD.OFFSET", 71 | "XFEL.FEL/UNDULATOR.SASE2/CAY.CELL17.SA2/FIELD.OFFSET", ] 72 | 73 | devices1 = [] 74 | ivalues = [] 75 | dev_steps = [] 76 | for pv in pvs: 77 | d = mi.device_factory(pv=pv) 78 | d.mi = mi 79 | d.get_limits = lambda : [-1, 1] 80 | d.istep = 0.2 # initial step 81 | devices1.append(d) 82 | 83 | # if Action's argument "func = None" optimization uses function Optimizer.max_target_func() 84 | a1 = Action(func=None, args=[target, devices1]) 85 | # for logging 86 | a1.finalize = lambda : mi.write_data(method_name="simplex", objective_func=target, devices=devices1, 87 | maximization=maximization, max_iter=max_iter) 88 | 89 | 90 | # create devices for ACTION 2 91 | pvs = [ 92 | "XFEL.FEL/UNDULATOR.SASE2/CBY.CELL19.SA2/FIELD.OFFSET", 93 | "XFEL.FEL/UNDULATOR.SASE2/CAX.CELL20.SA2/FIELD.OFFSET", 94 | "XFEL.FEL/UNDULATOR.SASE2/CBX.CELL20.SA2/FIELD.OFFSET", 95 | "XFEL.FEL/UNDULATOR.SASE2/CAY.CELL20.SA2/FIELD.OFFSET" ] 96 | 97 | devices2 = [] 98 | for pv in pvs: 99 | d = mi.device_factory(pv=pv) 100 | d.mi = mi 101 | d.get_limits = lambda : [-1, 1] 102 | d.istep = 0.2 # initial step 103 | devices2.append(d) 104 | 105 | 106 | a2 = Action(func=None, args=[target2, devices2]) 107 | a2.finalize = lambda : mi.write_data("simplex", target2, devices2, maximization, max_iter) 108 | 109 | # sequence of optimizations 110 | seq = [a1, a2] 111 | 112 | # set Machine protection 113 | alarm_dev = mi.device_factory(pv="XFEL.DIAG/TOROID/TORA.1865.TL/CHARGE.ALL") 114 | alarm_dev.mi = mi 115 | 116 | m_status = MachineStatus() 117 | m_status.alarm_device = alarm_dev 118 | m_status.alarm_max = 1 119 | m_status.alarm_min = -1 120 | 121 | 122 | # init Minimizer 123 | minimizer = Simplex() 124 | minimizer.max_iter = max_iter 125 | 126 | # init Optimizer 127 | opt = Optimizer() 128 | opt.maximization = maximization 129 | opt.timeout = timeout 130 | 131 | opt.opt_ctrl = OptControl() 132 | opt.opt_ctrl.m_status = m_status 133 | opt.opt_ctrl.timeout = 5 # if machine runs again after interruption wait 5 sec before continue optimization 134 | opt.minimizer = minimizer 135 | 136 | # run optimizations 137 | opt.eval(seq) 138 | 139 | #%% 140 | # plotting 141 | t = np.array(target.times) - target.times[0] 142 | v = target.values 143 | v_std = target.std_dev 144 | 145 | t2 = np.array(target2.times) - target2.times[0] + (target.times[-1] - target.times[0]) 146 | v2 = target2.values 147 | v_std2 = target2.std_dev 148 | 149 | plt.subplot(311) 150 | plt.plot(t, v, lw=2, label="obj func") 151 | plt.plot(t2, v2, lw=2, label="obj func") 152 | plt.legend() 153 | 154 | plt.subplot(312) 155 | plt.plot(t, v_std, lw=2, label="obj func std") 156 | plt.plot(t2, v_std2, lw=2, label="obj func std") 157 | plt.legend() 158 | 159 | plt.subplot(313) 160 | for dev in devices1: 161 | t = np.array(dev.times) - dev.times[0] 162 | v = dev.values 163 | plt.plot(t, v, label=dev.id) 164 | 165 | for dev in devices2: 166 | t = np.array(dev.times) - dev.times[0] + devices1[0].times[-1] - devices1[0].times[0] 167 | v = dev.values 168 | plt.plot(t, v, label=dev.id) 169 | plt.legend() 170 | plt.show() 171 | #mi.write_data("simplex", target, devices, opt.maximization, minimizer.max_iter) -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = OcelotOptimizer 8 | SOURCEDIR = ./source 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /docs/_build/doctrees/environment.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/doctrees/environment.pickle -------------------------------------------------------------------------------- /docs/_build/doctrees/index.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/doctrees/index.doctree -------------------------------------------------------------------------------- /docs/_build/doctrees/intro.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/doctrees/intro.doctree -------------------------------------------------------------------------------- /docs/_build/doctrees/modules.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/doctrees/modules.doctree -------------------------------------------------------------------------------- /docs/_build/doctrees/optimizer.GP.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/doctrees/optimizer.GP.doctree -------------------------------------------------------------------------------- /docs/_build/doctrees/optimizer.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/doctrees/optimizer.doctree -------------------------------------------------------------------------------- /docs/_build/doctrees/optimizer.mint.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/doctrees/optimizer.mint.doctree -------------------------------------------------------------------------------- /docs/_build/doctrees/optimizer.resetpanel.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/doctrees/optimizer.resetpanel.doctree -------------------------------------------------------------------------------- /docs/_build/doctrees/status.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/doctrees/status.doctree -------------------------------------------------------------------------------- /docs/_build/doctrees/structure.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/doctrees/structure.doctree -------------------------------------------------------------------------------- /docs/_build/doctrees/usage.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/doctrees/usage.doctree -------------------------------------------------------------------------------- /docs/_build/html/.buildinfo: -------------------------------------------------------------------------------- 1 | # Sphinx build info version 1 2 | # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. 3 | config: 8e59aa1e511214ac7196372553fcd65f 4 | tags: 645f666f9bcd5a90fca523b33c5a78b7 5 | -------------------------------------------------------------------------------- /docs/_build/html/_images/ocelot_dev_panel.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/html/_images/ocelot_dev_panel.jpg -------------------------------------------------------------------------------- /docs/_build/html/_images/ocelot_options.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/html/_images/ocelot_options.png -------------------------------------------------------------------------------- /docs/_build/html/_images/ocelot_plots.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/html/_images/ocelot_plots.png -------------------------------------------------------------------------------- /docs/_build/html/_images/ocelot_resetpanel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/html/_images/ocelot_resetpanel.png -------------------------------------------------------------------------------- /docs/_build/html/_images/ocelot_savefile.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/html/_images/ocelot_savefile.png -------------------------------------------------------------------------------- /docs/_build/html/_images/ocelot_scan.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/html/_images/ocelot_scan.png -------------------------------------------------------------------------------- /docs/_build/html/_modules/index.html: -------------------------------------------------------------------------------- 1 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | Overview: module code — OcelotOptimizer 1.0 documentation 10 | 11 | 12 | 13 | 14 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 43 | 44 |
45 | 69 | 83 |
84 |
85 | 97 | 101 | 102 | -------------------------------------------------------------------------------- /docs/_build/html/_sources/index.rst.txt: -------------------------------------------------------------------------------- 1 | .. OcelotInterface documentation master file, created by 2 | sphinx-quickstart on Wed May 11 14:19:02 2016. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to OcelotInterface's documentation 7 | ========================================== 8 | 9 | **This page documents the structure and various packages used in the OcelotInterface GUI.** 10 | 11 | .. image:: images/ocelot_scan.png 12 | :width: 600 13 | 14 | *The OcelotInterface GUI after a simplex optimization of the beam pointing at the end of the Injector* 15 | 16 | 17 | Contents 18 | -------- 19 | .. toctree:: 20 | :maxdepth: 2 21 | 22 | intro 23 | usage 24 | structure 25 | status 26 | 27 | 28 | Source Documentation 29 | ------ 30 | .. toctree:: 31 | :maxdepth 3 32 | 33 | modules 34 | 35 | Indices and tables 36 | ------------------ 37 | 38 | * :ref:`genindex` 39 | * :ref:`modindex` 40 | * :ref:`search` 41 | 42 | -------------------------------------------------------------------------------- /docs/_build/html/_sources/intro.rst.txt: -------------------------------------------------------------------------------- 1 | Introduction 2 | ============ 3 | 4 | This is deep modification of SLAC version of the Ocelot GUI (Tyler Cope, SLAC, 2016) for the European XFEL facility. 5 | 6 | Sergey Tomin, 2017. 7 | 8 | 9 | What is OceotInterface? 10 | ----------------------- 11 | 12 | OcelotInterface is a a python and PyQt based GUI for running and testing accelerator optimization methods. 13 | 14 | This project stems from work by Ilya Agapov (DESY) on the OCELOT python package, an accelerator simulaiton and interface framework. 15 | The OCELOT package is pirmarily used for x-ray simulation but it contains files for beam optimization, which we have used and expanded upon for accelerator tuning. 16 | The main optimization file Ocelot.util.mint was used to test the scanner on LCLS by writing a machine interface wrapper file. Soon after a GUI interface was created to facilitate easy testing. 17 | 18 | The goal is two fold: 19 | 20 | * To provide a stable user interface to run optimization software in standard tuning procedures. 21 | * To provide a testing framework for new methods of optimization 22 | 23 | The software is now used in standard tuning and saves data to the matlab physics data directory for analysis. 24 | 25 | 26 | What can it do? 27 | --------------- 28 | 29 | Currently the production GUI is used to run optimization scans using the Nelder-Mead simplex algorithm, to optimize the machine FEL or some other parameter. 30 | The interface provides a method to select multiple tuning devices for a scan, and quickly reset devices in even of a problem. 31 | The options panel tab allows a user to change settings for the scan. 32 | 33 | The software is also in development to use a new Bayesian optimization method called the Gaussian Process. 34 | The GP scanner uses the algorithm detailed in the papers below in the reference secton. 35 | So far initial tests have been conducted using the GP scanner, but it is not yet ready for production use. 36 | 37 | 38 | Resources 39 | --------- 40 | 41 | **OCELOT Info** 42 | 43 | * `OCELOT GitHub `_ 44 | * `Simplex algorithm wiki `_ 45 | * `IPAC16 Automated Tuning: `_ 46 | 47 | **Bayesian Optimization** 48 | 49 | * `Gaussian Processes textbook: `_ 50 | * `Bayesian Optimization Text: `_ 51 | * `IPAC16 Bayesian Optimization: `_ 52 | -------------------------------------------------------------------------------- /docs/_build/html/_sources/modules.rst.txt: -------------------------------------------------------------------------------- 1 | optimizer 2 | ========= 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | optimizer 8 | -------------------------------------------------------------------------------- /docs/_build/html/_sources/optimizer.GP.rst.txt: -------------------------------------------------------------------------------- 1 | optimizer.GP package 2 | ==================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | optimizer.GP.OnlineGP module 8 | ---------------------------- 9 | 10 | .. automodule:: optimizer.GP.OnlineGP 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | optimizer.GP.bayes_optimization module 16 | -------------------------------------- 17 | 18 | .. automodule:: optimizer.GP.bayes_optimization 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | optimizer.GP.gaussian_process module 24 | ------------------------------------ 25 | 26 | .. automodule:: optimizer.GP.gaussian_process 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | 32 | Module contents 33 | --------------- 34 | 35 | .. automodule:: optimizer.GP 36 | :members: 37 | :undoc-members: 38 | :show-inheritance: 39 | -------------------------------------------------------------------------------- /docs/_build/html/_sources/optimizer.mint.rst.txt: -------------------------------------------------------------------------------- 1 | optimizer.mint package 2 | ====================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | optimizer.mint.lcls_interface module 8 | ------------------------------------ 9 | 10 | .. automodule:: optimizer.mint.lcls_interface 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | optimizer.mint.mint module 16 | -------------------------- 17 | 18 | .. automodule:: optimizer.mint.mint 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | optimizer.mint.obj_function module 24 | ---------------------------------- 25 | 26 | .. automodule:: optimizer.mint.obj_function 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | optimizer.mint.opt_objects module 32 | --------------------------------- 33 | 34 | .. automodule:: optimizer.mint.opt_objects 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | optimizer.mint.xfel_interface module 40 | ------------------------------------ 41 | 42 | .. automodule:: optimizer.mint.xfel_interface 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | 48 | Module contents 49 | --------------- 50 | 51 | .. automodule:: optimizer.mint 52 | :members: 53 | :undoc-members: 54 | :show-inheritance: 55 | -------------------------------------------------------------------------------- /docs/_build/html/_sources/optimizer.resetpanel.rst.txt: -------------------------------------------------------------------------------- 1 | optimizer.resetpanel package 2 | ============================ 3 | 4 | Submodules 5 | ---------- 6 | 7 | optimizer.resetpanel.UIresetpanel module 8 | ---------------------------------------- 9 | 10 | .. automodule:: optimizer.resetpanel.UIresetpanel 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | optimizer.resetpanel.resetpanel module 16 | -------------------------------------- 17 | 18 | .. automodule:: optimizer.resetpanel.resetpanel 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | optimizer.resetpanel.resetpanelbox module 24 | ----------------------------------------- 25 | 26 | .. automodule:: optimizer.resetpanel.resetpanelbox 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | 32 | Module contents 33 | --------------- 34 | 35 | .. automodule:: optimizer.resetpanel 36 | :members: 37 | :undoc-members: 38 | :show-inheritance: 39 | -------------------------------------------------------------------------------- /docs/_build/html/_sources/optimizer.rst.txt: -------------------------------------------------------------------------------- 1 | optimizer package 2 | ================= 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | optimizer.GP 10 | optimizer.mint 11 | optimizer.resetpanel 12 | 13 | Submodules 14 | ---------- 15 | 16 | optimizer.UIOcelotInterface_gen module 17 | -------------------------------------- 18 | 19 | .. automodule:: optimizer.UIOcelotInterface_gen 20 | :members: 21 | :undoc-members: 22 | :show-inheritance: 23 | 24 | optimizer.generic_optim module 25 | ------------------------------ 26 | 27 | .. automodule:: optimizer.generic_optim 28 | :members: 29 | :undoc-members: 30 | :show-inheritance: 31 | 32 | optimizer.gui_main module 33 | ------------------------- 34 | 35 | .. automodule:: optimizer.gui_main 36 | :members: 37 | :undoc-members: 38 | :show-inheritance: 39 | 40 | 41 | Module contents 42 | --------------- 43 | 44 | .. automodule:: optimizer 45 | :members: 46 | :undoc-members: 47 | :show-inheritance: 48 | -------------------------------------------------------------------------------- /docs/_build/html/_sources/status.rst.txt: -------------------------------------------------------------------------------- 1 | Project Status 2 | ============== 3 | 4 | Currently OcelotInterface is in production using the simplex algorithm, non normalized. 5 | 6 | The normzization of simplex parameter code is ready, but a good solution for calculating parameters based on energy is not. 7 | For now the normalization checkbox remains disabled. This could be enabled, but will proabaly stay this way for now. 8 | 9 | Versions for the GP scanner are ready and have benn run, but not enabled in produciton. 10 | 11 | 12 | To Do List 13 | ---------- 14 | 15 | Edited 5/19/16 16 | TMC 17 | 18 | * Finish documentation 19 | * Write method/class to detect klystron trips 20 | * Write method to save GP model data at every step of GP scan 21 | * Write method for scripting optimizer runs 22 | * Lots of testing of the GP scanner 23 | 24 | Wiki Notes 25 | ---------- 26 | 27 | Probably wont be updated too often 28 | 29 | `Wiki page with task list and development notes `_ 30 | -------------------------------------------------------------------------------- /docs/_build/html/_static/ajax-loader.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/html/_static/ajax-loader.gif -------------------------------------------------------------------------------- /docs/_build/html/_static/classic.css: -------------------------------------------------------------------------------- 1 | /* 2 | * classic.css_t 3 | * ~~~~~~~~~~~~~ 4 | * 5 | * Sphinx stylesheet -- classic theme. 6 | * 7 | * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. 8 | * :license: BSD, see LICENSE for details. 9 | * 10 | */ 11 | 12 | @import url("basic.css"); 13 | 14 | /* -- page layout ----------------------------------------------------------- */ 15 | 16 | body { 17 | font-family: sans-serif; 18 | font-size: 100%; 19 | background-color: #11303d; 20 | color: #000; 21 | margin: 0; 22 | padding: 0; 23 | } 24 | 25 | div.document { 26 | background-color: #1c4e63; 27 | } 28 | 29 | div.documentwrapper { 30 | float: left; 31 | width: 100%; 32 | } 33 | 34 | div.bodywrapper { 35 | margin: 0 0 0 230px; 36 | } 37 | 38 | div.body { 39 | background-color: #ffffff; 40 | color: #000000; 41 | padding: 0 20px 30px 20px; 42 | } 43 | 44 | div.footer { 45 | color: #ffffff; 46 | width: 100%; 47 | padding: 9px 0 9px 0; 48 | text-align: center; 49 | font-size: 75%; 50 | } 51 | 52 | div.footer a { 53 | color: #ffffff; 54 | text-decoration: underline; 55 | } 56 | 57 | div.related { 58 | background-color: #133f52; 59 | line-height: 30px; 60 | color: #ffffff; 61 | } 62 | 63 | div.related a { 64 | color: #ffffff; 65 | } 66 | 67 | div.sphinxsidebar { 68 | } 69 | 70 | div.sphinxsidebar h3 { 71 | font-family: 'Trebuchet MS', sans-serif; 72 | color: #ffffff; 73 | font-size: 1.4em; 74 | font-weight: normal; 75 | margin: 0; 76 | padding: 0; 77 | } 78 | 79 | div.sphinxsidebar h3 a { 80 | color: #ffffff; 81 | } 82 | 83 | div.sphinxsidebar h4 { 84 | font-family: 'Trebuchet MS', sans-serif; 85 | color: #ffffff; 86 | font-size: 1.3em; 87 | font-weight: normal; 88 | margin: 5px 0 0 0; 89 | padding: 0; 90 | } 91 | 92 | div.sphinxsidebar p { 93 | color: #ffffff; 94 | } 95 | 96 | div.sphinxsidebar p.topless { 97 | margin: 5px 10px 10px 10px; 98 | } 99 | 100 | div.sphinxsidebar ul { 101 | margin: 10px; 102 | padding: 0; 103 | color: #ffffff; 104 | } 105 | 106 | div.sphinxsidebar a { 107 | color: #98dbcc; 108 | } 109 | 110 | div.sphinxsidebar input { 111 | border: 1px solid #98dbcc; 112 | font-family: sans-serif; 113 | font-size: 1em; 114 | } 115 | 116 | 117 | 118 | /* -- hyperlink styles ------------------------------------------------------ */ 119 | 120 | a { 121 | color: #355f7c; 122 | text-decoration: none; 123 | } 124 | 125 | a:visited { 126 | color: #355f7c; 127 | text-decoration: none; 128 | } 129 | 130 | a:hover { 131 | text-decoration: underline; 132 | } 133 | 134 | 135 | 136 | /* -- body styles ----------------------------------------------------------- */ 137 | 138 | div.body h1, 139 | div.body h2, 140 | div.body h3, 141 | div.body h4, 142 | div.body h5, 143 | div.body h6 { 144 | font-family: 'Trebuchet MS', sans-serif; 145 | background-color: #f2f2f2; 146 | font-weight: normal; 147 | color: #20435c; 148 | border-bottom: 1px solid #ccc; 149 | margin: 20px -20px 10px -20px; 150 | padding: 3px 0 3px 10px; 151 | } 152 | 153 | div.body h1 { margin-top: 0; font-size: 200%; } 154 | div.body h2 { font-size: 160%; } 155 | div.body h3 { font-size: 140%; } 156 | div.body h4 { font-size: 120%; } 157 | div.body h5 { font-size: 110%; } 158 | div.body h6 { font-size: 100%; } 159 | 160 | a.headerlink { 161 | color: #c60f0f; 162 | font-size: 0.8em; 163 | padding: 0 4px 0 4px; 164 | text-decoration: none; 165 | } 166 | 167 | a.headerlink:hover { 168 | background-color: #c60f0f; 169 | color: white; 170 | } 171 | 172 | div.body p, div.body dd, div.body li, div.body blockquote { 173 | text-align: justify; 174 | line-height: 130%; 175 | } 176 | 177 | div.admonition p.admonition-title + p { 178 | display: inline; 179 | } 180 | 181 | div.admonition p { 182 | margin-bottom: 5px; 183 | } 184 | 185 | div.admonition pre { 186 | margin-bottom: 5px; 187 | } 188 | 189 | div.admonition ul, div.admonition ol { 190 | margin-bottom: 5px; 191 | } 192 | 193 | div.note { 194 | background-color: #eee; 195 | border: 1px solid #ccc; 196 | } 197 | 198 | div.seealso { 199 | background-color: #ffc; 200 | border: 1px solid #ff6; 201 | } 202 | 203 | div.topic { 204 | background-color: #eee; 205 | } 206 | 207 | div.warning { 208 | background-color: #ffe4e4; 209 | border: 1px solid #f66; 210 | } 211 | 212 | p.admonition-title { 213 | display: inline; 214 | } 215 | 216 | p.admonition-title:after { 217 | content: ":"; 218 | } 219 | 220 | pre { 221 | padding: 5px; 222 | background-color: #eeffcc; 223 | color: #333333; 224 | line-height: 120%; 225 | border: 1px solid #ac9; 226 | border-left: none; 227 | border-right: none; 228 | } 229 | 230 | code { 231 | background-color: #ecf0f3; 232 | padding: 0 1px 0 1px; 233 | font-size: 0.95em; 234 | } 235 | 236 | th { 237 | background-color: #ede; 238 | } 239 | 240 | .warning code { 241 | background: #efc2c2; 242 | } 243 | 244 | .note code { 245 | background: #d6d6d6; 246 | } 247 | 248 | .viewcode-back { 249 | font-family: sans-serif; 250 | } 251 | 252 | div.viewcode-block:target { 253 | background-color: #f4debf; 254 | border-top: 1px solid #ac9; 255 | border-bottom: 1px solid #ac9; 256 | } 257 | 258 | div.code-block-caption { 259 | color: #efefef; 260 | background-color: #1c4e63; 261 | } -------------------------------------------------------------------------------- /docs/_build/html/_static/comment-bright.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/html/_static/comment-bright.png -------------------------------------------------------------------------------- /docs/_build/html/_static/comment-close.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/html/_static/comment-close.png -------------------------------------------------------------------------------- /docs/_build/html/_static/comment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/html/_static/comment.png -------------------------------------------------------------------------------- /docs/_build/html/_static/default.css: -------------------------------------------------------------------------------- 1 | @import url("classic.css"); 2 | -------------------------------------------------------------------------------- /docs/_build/html/_static/down-pressed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/html/_static/down-pressed.png -------------------------------------------------------------------------------- /docs/_build/html/_static/down.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/html/_static/down.png -------------------------------------------------------------------------------- /docs/_build/html/_static/file.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/html/_static/file.png -------------------------------------------------------------------------------- /docs/_build/html/_static/minus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/html/_static/minus.png -------------------------------------------------------------------------------- /docs/_build/html/_static/plus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/html/_static/plus.png -------------------------------------------------------------------------------- /docs/_build/html/_static/pygments.css: -------------------------------------------------------------------------------- 1 | .highlight .hll { background-color: #ffffcc } 2 | .highlight { background: #eeffcc; } 3 | .highlight .c { color: #408090; font-style: italic } /* Comment */ 4 | .highlight .err { border: 1px solid #FF0000 } /* Error */ 5 | .highlight .k { color: #007020; font-weight: bold } /* Keyword */ 6 | .highlight .o { color: #666666 } /* Operator */ 7 | .highlight .ch { color: #408090; font-style: italic } /* Comment.Hashbang */ 8 | .highlight .cm { color: #408090; font-style: italic } /* Comment.Multiline */ 9 | .highlight .cp { color: #007020 } /* Comment.Preproc */ 10 | .highlight .cpf { color: #408090; font-style: italic } /* Comment.PreprocFile */ 11 | .highlight .c1 { color: #408090; font-style: italic } /* Comment.Single */ 12 | .highlight .cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */ 13 | .highlight .gd { color: #A00000 } /* Generic.Deleted */ 14 | .highlight .ge { font-style: italic } /* Generic.Emph */ 15 | .highlight .gr { color: #FF0000 } /* Generic.Error */ 16 | .highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ 17 | .highlight .gi { color: #00A000 } /* Generic.Inserted */ 18 | .highlight .go { color: #333333 } /* Generic.Output */ 19 | .highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */ 20 | .highlight .gs { font-weight: bold } /* Generic.Strong */ 21 | .highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ 22 | .highlight .gt { color: #0044DD } /* Generic.Traceback */ 23 | .highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */ 24 | .highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */ 25 | .highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */ 26 | .highlight .kp { color: #007020 } /* Keyword.Pseudo */ 27 | .highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */ 28 | .highlight .kt { color: #902000 } /* Keyword.Type */ 29 | .highlight .m { color: #208050 } /* Literal.Number */ 30 | .highlight .s { color: #4070a0 } /* Literal.String */ 31 | .highlight .na { color: #4070a0 } /* Name.Attribute */ 32 | .highlight .nb { color: #007020 } /* Name.Builtin */ 33 | .highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */ 34 | .highlight .no { color: #60add5 } /* Name.Constant */ 35 | .highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */ 36 | .highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */ 37 | .highlight .ne { color: #007020 } /* Name.Exception */ 38 | .highlight .nf { color: #06287e } /* Name.Function */ 39 | .highlight .nl { color: #002070; font-weight: bold } /* Name.Label */ 40 | .highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */ 41 | .highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */ 42 | .highlight .nv { color: #bb60d5 } /* Name.Variable */ 43 | .highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */ 44 | .highlight .w { color: #bbbbbb } /* Text.Whitespace */ 45 | .highlight .mb { color: #208050 } /* Literal.Number.Bin */ 46 | .highlight .mf { color: #208050 } /* Literal.Number.Float */ 47 | .highlight .mh { color: #208050 } /* Literal.Number.Hex */ 48 | .highlight .mi { color: #208050 } /* Literal.Number.Integer */ 49 | .highlight .mo { color: #208050 } /* Literal.Number.Oct */ 50 | .highlight .sb { color: #4070a0 } /* Literal.String.Backtick */ 51 | .highlight .sc { color: #4070a0 } /* Literal.String.Char */ 52 | .highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */ 53 | .highlight .s2 { color: #4070a0 } /* Literal.String.Double */ 54 | .highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */ 55 | .highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */ 56 | .highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */ 57 | .highlight .sx { color: #c65d09 } /* Literal.String.Other */ 58 | .highlight .sr { color: #235388 } /* Literal.String.Regex */ 59 | .highlight .s1 { color: #4070a0 } /* Literal.String.Single */ 60 | .highlight .ss { color: #517918 } /* Literal.String.Symbol */ 61 | .highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */ 62 | .highlight .vc { color: #bb60d5 } /* Name.Variable.Class */ 63 | .highlight .vg { color: #bb60d5 } /* Name.Variable.Global */ 64 | .highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */ 65 | .highlight .il { color: #208050 } /* Literal.Number.Integer.Long */ -------------------------------------------------------------------------------- /docs/_build/html/_static/up-pressed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/html/_static/up-pressed.png -------------------------------------------------------------------------------- /docs/_build/html/_static/up.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/html/_static/up.png -------------------------------------------------------------------------------- /docs/_build/html/objects.inv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/_build/html/objects.inv -------------------------------------------------------------------------------- /docs/_build/html/search.html: -------------------------------------------------------------------------------- 1 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | Search — OcelotOptimizer 1.0 documentation 10 | 11 | 12 | 13 | 14 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 51 | 52 |
53 |
54 |
55 |
56 | 57 |

Search

58 |
59 | 60 |

61 | Please activate JavaScript to enable the search 62 | functionality. 63 |

64 |
65 |

66 | From here you can search these documents. Enter your search 67 | words into the box below and click "search". Note that the search 68 | function will automatically search for all of the words. Pages 69 | containing fewer words won't appear in the result list. 70 |

71 |
72 | 73 | 74 | 75 |
76 | 77 |
78 | 79 |
80 | 81 |
82 |
83 |
84 | 88 |
89 |
90 | 102 | 106 | 107 | -------------------------------------------------------------------------------- /docs/help.txt: -------------------------------------------------------------------------------- 1 | sphinx-apidoc -o c:\Users\tomins\Documents\Dropbox\DESY\repository\ocelot\optimizer\docs\sour 2 | ce c:\Users\tomins\Documents\Dropbox\DESY\repository\ocelot\optimizer\ 3 | 4 | make.bat html 5 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=.\source 11 | set BUILDDIR=_build 12 | set SPHINXPROJ=OcelotOptimizer 13 | 14 | if "%1" == "" goto help 15 | 16 | %SPHINXBUILD% >NUL 2>NUL 17 | if errorlevel 9009 ( 18 | echo. 19 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 20 | echo.installed, then set the SPHINXBUILD environment variable to point 21 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 22 | echo.may add the Sphinx directory to PATH. 23 | echo. 24 | echo.If you don't have Sphinx installed, grab it from 25 | echo.http://sphinx-doc.org/ 26 | exit /b 1 27 | ) 28 | 29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 30 | goto end 31 | 32 | :help 33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 34 | 35 | :end 36 | popd 37 | -------------------------------------------------------------------------------- /docs/readme/ocelot_anim.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/readme/ocelot_anim.gif -------------------------------------------------------------------------------- /docs/readme/ocelot_schematic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/readme/ocelot_schematic.png -------------------------------------------------------------------------------- /docs/source/images/ocelot_dev_panel.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/source/images/ocelot_dev_panel.jpg -------------------------------------------------------------------------------- /docs/source/images/ocelot_options.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/source/images/ocelot_options.png -------------------------------------------------------------------------------- /docs/source/images/ocelot_plots.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/source/images/ocelot_plots.png -------------------------------------------------------------------------------- /docs/source/images/ocelot_resetpanel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/source/images/ocelot_resetpanel.png -------------------------------------------------------------------------------- /docs/source/images/ocelot_savefile.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/source/images/ocelot_savefile.png -------------------------------------------------------------------------------- /docs/source/images/ocelot_scan.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/source/images/ocelot_scan.png -------------------------------------------------------------------------------- /docs/source/images/ocelot_tab2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/source/images/ocelot_tab2.png -------------------------------------------------------------------------------- /docs/source/images/ocelot_tab3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/docs/source/images/ocelot_tab3.png -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. OcelotInterface documentation master file, created by 2 | sphinx-quickstart on Wed May 11 14:19:02 2016. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to OcelotInterface's documentation 7 | ========================================== 8 | 9 | **This page documents the structure and various packages used in the OcelotInterface GUI.** 10 | 11 | .. image:: images/ocelot_scan.png 12 | :width: 600 13 | 14 | *The OcelotInterface GUI after a simplex optimization of the beam pointing at the end of the Injector* 15 | 16 | 17 | Contents 18 | -------- 19 | .. toctree:: 20 | :maxdepth: 2 21 | 22 | intro 23 | usage 24 | structure 25 | status 26 | 27 | 28 | Source Documentation 29 | ------ 30 | .. toctree:: 31 | :maxdepth 3 32 | 33 | modules 34 | 35 | Indices and tables 36 | ------------------ 37 | 38 | * :ref:`genindex` 39 | * :ref:`modindex` 40 | * :ref:`search` 41 | 42 | -------------------------------------------------------------------------------- /docs/source/intro.rst: -------------------------------------------------------------------------------- 1 | Introduction 2 | ============ 3 | 4 | This is deep modification of SLAC version of the Ocelot GUI (Tyler Cope, SLAC, 2016) for the European XFEL facility. 5 | 6 | Sergey Tomin, 2017. 7 | 8 | 9 | What is OceotInterface? 10 | ----------------------- 11 | 12 | OcelotInterface is a a python and PyQt based GUI for running and testing accelerator optimization methods. 13 | 14 | This project stems from work by Ilya Agapov (DESY) on the OCELOT python package, an accelerator simulaiton and interface framework. 15 | The OCELOT package is pirmarily used for x-ray simulation but it contains files for beam optimization, which we have used and expanded upon for accelerator tuning. 16 | The main optimization file Ocelot.util.mint was used to test the scanner on LCLS by writing a machine interface wrapper file. Soon after a GUI interface was created to facilitate easy testing. 17 | 18 | The goal is two fold: 19 | 20 | * To provide a stable user interface to run optimization software in standard tuning procedures. 21 | * To provide a testing framework for new methods of optimization 22 | 23 | The software is now used in standard tuning and saves data to the matlab physics data directory for analysis. 24 | 25 | 26 | What can it do? 27 | --------------- 28 | 29 | Currently the production GUI is used to run optimization scans using the Nelder-Mead simplex algorithm, to optimize the machine FEL or some other parameter. 30 | The interface provides a method to select multiple tuning devices for a scan, and quickly reset devices in even of a problem. 31 | The options panel tab allows a user to change settings for the scan. 32 | 33 | The software is also in development to use a new Bayesian optimization method called the Gaussian Process. 34 | The GP scanner uses the algorithm detailed in the papers below in the reference secton. 35 | So far initial tests have been conducted using the GP scanner, but it is not yet ready for production use. 36 | 37 | 38 | Resources 39 | --------- 40 | 41 | **OCELOT Info** 42 | 43 | * `OCELOT GitHub `_ 44 | * `Simplex algorithm wiki `_ 45 | * `IPAC16 Automated Tuning: `_ 46 | 47 | **Bayesian Optimization** 48 | 49 | * `Gaussian Processes textbook: `_ 50 | * `Bayesian Optimization Text: `_ 51 | * `IPAC16 Bayesian Optimization: `_ 52 | -------------------------------------------------------------------------------- /docs/source/modules.rst: -------------------------------------------------------------------------------- 1 | optimizer 2 | ========= 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | optimizer 8 | -------------------------------------------------------------------------------- /docs/source/optimizer.GP.rst: -------------------------------------------------------------------------------- 1 | optimizer.GP package 2 | ==================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | optimizer.GP.OnlineGP module 8 | ---------------------------- 9 | 10 | .. automodule:: optimizer.GP.OnlineGP 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | optimizer.GP.bayes_optimization module 16 | -------------------------------------- 17 | 18 | .. automodule:: optimizer.GP.bayes_optimization 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | optimizer.GP.gaussian_process module 24 | ------------------------------------ 25 | 26 | .. automodule:: optimizer.GP.gaussian_process 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | 32 | Module contents 33 | --------------- 34 | 35 | .. automodule:: optimizer.GP 36 | :members: 37 | :undoc-members: 38 | :show-inheritance: 39 | -------------------------------------------------------------------------------- /docs/source/optimizer.mint.rst: -------------------------------------------------------------------------------- 1 | optimizer.mint package 2 | ====================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | optimizer.mint.lcls_interface module 8 | ------------------------------------ 9 | 10 | .. automodule:: optimizer.mint.lcls_interface 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | optimizer.mint.mint module 16 | -------------------------- 17 | 18 | .. automodule:: optimizer.mint.mint 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | optimizer.mint.obj_function module 24 | ---------------------------------- 25 | 26 | .. automodule:: optimizer.mint.obj_function 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | optimizer.mint.opt_objects module 32 | --------------------------------- 33 | 34 | .. automodule:: optimizer.mint.opt_objects 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | optimizer.mint.xfel_interface module 40 | ------------------------------------ 41 | 42 | .. automodule:: optimizer.mint.xfel_interface 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | 48 | Module contents 49 | --------------- 50 | 51 | .. automodule:: optimizer.mint 52 | :members: 53 | :undoc-members: 54 | :show-inheritance: 55 | -------------------------------------------------------------------------------- /docs/source/optimizer.resetpanel.rst: -------------------------------------------------------------------------------- 1 | optimizer.resetpanel package 2 | ============================ 3 | 4 | Submodules 5 | ---------- 6 | 7 | optimizer.resetpanel.UIresetpanel module 8 | ---------------------------------------- 9 | 10 | .. automodule:: optimizer.resetpanel.UIresetpanel 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | optimizer.resetpanel.resetpanel module 16 | -------------------------------------- 17 | 18 | .. automodule:: optimizer.resetpanel.resetpanel 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | optimizer.resetpanel.resetpanelbox module 24 | ----------------------------------------- 25 | 26 | .. automodule:: optimizer.resetpanel.resetpanelbox 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | 32 | Module contents 33 | --------------- 34 | 35 | .. automodule:: optimizer.resetpanel 36 | :members: 37 | :undoc-members: 38 | :show-inheritance: 39 | -------------------------------------------------------------------------------- /docs/source/optimizer.rst: -------------------------------------------------------------------------------- 1 | optimizer package 2 | ================= 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | optimizer.GP 10 | optimizer.mint 11 | optimizer.resetpanel 12 | 13 | Submodules 14 | ---------- 15 | 16 | optimizer.UIOcelotInterface_gen module 17 | -------------------------------------- 18 | 19 | .. automodule:: optimizer.UIOcelotInterface_gen 20 | :members: 21 | :undoc-members: 22 | :show-inheritance: 23 | 24 | optimizer.generic_optim module 25 | ------------------------------ 26 | 27 | .. automodule:: optimizer.generic_optim 28 | :members: 29 | :undoc-members: 30 | :show-inheritance: 31 | 32 | optimizer.gui_main module 33 | ------------------------- 34 | 35 | .. automodule:: optimizer.gui_main 36 | :members: 37 | :undoc-members: 38 | :show-inheritance: 39 | 40 | 41 | Module contents 42 | --------------- 43 | 44 | .. automodule:: optimizer 45 | :members: 46 | :undoc-members: 47 | :show-inheritance: 48 | -------------------------------------------------------------------------------- /docs/source/status.rst: -------------------------------------------------------------------------------- 1 | Project Status 2 | ============== 3 | 4 | Currently OcelotInterface is in production using the simplex algorithm, non normalized. 5 | 6 | The normzization of simplex parameter code is ready, but a good solution for calculating parameters based on energy is not. 7 | For now the normalization checkbox remains disabled. This could be enabled, but will proabaly stay this way for now. 8 | 9 | Versions for the GP scanner are ready and have benn run, but not enabled in produciton. 10 | 11 | 12 | To Do List 13 | ---------- 14 | 15 | Edited 5/19/16 16 | TMC 17 | 18 | * Finish documentation 19 | * Write method/class to detect klystron trips 20 | * Write method to save GP model data at every step of GP scan 21 | * Write method for scripting optimizer runs 22 | * Lots of testing of the GP scanner 23 | 24 | Wiki Notes 25 | ---------- 26 | 27 | Probably wont be updated too often 28 | 29 | `Wiki page with task list and development notes `_ 30 | -------------------------------------------------------------------------------- /matrixmodel/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/matrixmodel/__init__.py -------------------------------------------------------------------------------- /matrixmodel/archive_stuff.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import urllib2 3 | import json 4 | import numpy as np 5 | import time as steve # had to do this to make datetime and time get along, since I'm too lazy to find the real solution to the problem. 6 | from datetime import * 7 | import pytz # python time zone package 8 | import pandas as pd 9 | 10 | ############# Methods to deal with parsing date strings and converting time to GMT to be used in the archiver ################# 11 | # Determines the year, month, and day from Adam's date string format. Ie.) '2017-05-23T00:00:00' 12 | def find_mon_day(dateString): 13 | year_num = int(dateString[0:4]) 14 | month_num = int(dateString[dateString.find('-')+1:dateString.find('-')+3]) 15 | day_num = int(dateString[dateString.find('-',dateString.find('-')+1)+1:dateString.find('-',dateString.find('-')+1)+3]) 16 | return year_num,month_num,day_num 17 | 18 | # Returns hour, minute, and second data from Adam's date string format 19 | def find_hr_min_sec(dateString): 20 | hr_num = int(dateString[dateString.find('T')+1:dateString.find('T')+3]) 21 | min_num = int(dateString[dateString.find(':')+1:dateString.find(':')+3]) 22 | sec_num = int(dateString[dateString.find(':',dateString.find(':')+1)+1:]) 23 | return hr_num,min_num,sec_num 24 | 25 | # Returns the year, month, day etc. as a tuple to be used in other methods. 26 | def datestr2ints(datestr): 27 | year,month,day = find_mon_day(datestr) 28 | hr,minute,sec = find_hr_min_sec(datestr) 29 | return (year,month,day,hr,minute,sec) 30 | 31 | # Evidently, the dates have to be put into the url as GMT not Pacific time to get the correct data and timestamps. 32 | # This converts the user format into GMT. 33 | # Mostly copied from Nora's code 34 | def userTime2gmt(datestr): 35 | datetuple = datestr2ints(datestr) 36 | timeAsDatetime = datetime(*datetuple) 37 | GmtTimeZone= pytz.timezone('GMT') # Greenwich mean time zone object 38 | LocalTimeZone=pytz.timezone('America/Los_Angeles') #local Pacific time zone object 39 | localDateTime=LocalTimeZone.localize(timeAsDatetime)#create a date object with a localized timezone for reference 40 | shiftedTime=localDateTime.astimezone(GmtTimeZone) 41 | return str(datetime.isoformat(shiftedTime))[:str(datetime.isoformat(shiftedTime)).find('+')] 42 | 43 | 44 | ####################### Methods to convert archive data to JSON format and then into lists ########################### 45 | # Converts urllib data into a np array via json...not really sure what json is, but it works! Thanks Adam! 46 | def datArrange(response): 47 | data = json.load(response) 48 | realData = data[0]["data"] 49 | seconds=np.array([x.get('secs') for x in realData[:]],dtype =float) 50 | nanosUse = 1e-9*np.array([x.get('nanos') for x in realData[:]], dtype = float) 51 | value = np.array([x.get('val') for x in realData[:]]) 52 | time = np.array([sum(x) for x in zip(seconds, nanosUse)],dtype = float) 53 | return time, value 54 | 55 | # Time start and end has to have format: '--T
::' 56 | # EX.) '2017-01-01T00:00:00' 57 | def pullData(pv, start, end=None): 58 | start = userTime2gmt(start) 59 | if type(end) == type(None): 60 | end = start 61 | else: 62 | end = userTime2gmt(end) 63 | url = "http://lcls-archapp.slac.stanford.edu/retrieval/data/getData.json?pv="+pv+"&from="+start+"Z&to="+end+"Z" 64 | #print url 65 | response = urllib2.urlopen(url) 66 | time, value = datArrange(response) 67 | return time,value 68 | 69 | 70 | #def convert_ts_str_to_userTime(ts_str): 71 | # user_time = ts_str[0:10] + 'T' + ts_str[13:15] + ':' + ts_str[16:18] + ':' + ts_str[19:21] 72 | # return user_time 73 | 74 | 75 | def count_unique(keys): 76 | uniq_keys = np.unique(keys) 77 | bins = uniq_keys.searchsorted(keys) 78 | return uniq_keys, np.bincount(bins) -------------------------------------------------------------------------------- /microwave_timer.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/microwave_timer.wav -------------------------------------------------------------------------------- /mint/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/mint/__init__.py -------------------------------------------------------------------------------- /mint/bessy/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/mint/bessy/__init__.py -------------------------------------------------------------------------------- /mint/bessy/bessy_obj_function.py: -------------------------------------------------------------------------------- 1 | import time 2 | import numpy as np 3 | 4 | from mint.opt_objects import Target 5 | import stats.stats as stats 6 | 7 | 8 | class BESSYTarget(Target): 9 | def __init__(self, mi=None, eid='GDET:FEE1:241:ENRCHSTBR'): 10 | """ 11 | :param mi: Machine interface 12 | :param eid: ID 13 | """ 14 | super(BESSYTarget, self).__init__(eid=eid) 15 | 16 | self.mi = mi 17 | self.kill = False 18 | self.objective_acquisition = None 19 | self.objective_mean = None 20 | self.objective_stdev = None 21 | 22 | self.objective_acquisitions = [] # all the points 23 | self.objective_means = [] 24 | self.std_dev = [] 25 | self.charge = [] 26 | self.current = [] 27 | self.losses = [] 28 | self.points = None 29 | 30 | def get_penalty(self): 31 | sase = self.get_value() 32 | alarm = self.get_alarm() 33 | pen = 0.0 34 | if alarm > 1.0: 35 | return self.pen_max 36 | if alarm > 0.7: 37 | return alarm * 50.0 38 | pen += alarm 39 | pen -= sase 40 | self.penalties.append(pen) 41 | self.times.append(time.time()) 42 | self.values.append(sase) # statistic 43 | self.objective_acquisitions.append(self.objective_acquisition) # array of points 44 | self.objective_means.append(self.objective_mean) 45 | self.std_dev.append(std) 46 | self.alarms.append(alarm) 47 | self.charge.append(charge) 48 | self.current.append(current) 49 | self.losses.append(losses) 50 | self.niter += 1 51 | return pen 52 | 53 | def get_value(self): 54 | """ 55 | Returns data for the ojective function (sase) from the selected detector PV. 56 | 57 | At lcls the repetition is 120Hz and the readout buf size is 2800. 58 | The last 120 entries correspond to pulse energies over past 1 second. 59 | 60 | Returns: 61 | Float of SASE or other detecor measurement 62 | """ 63 | datain = self.mi.get_value(self.eid) 64 | if self.points is None: 65 | self.points = 120 66 | print("Get Value of : ", self.points, " points.") 67 | 68 | if self.stats is None: 69 | self.stats = stats.StatNone 70 | 71 | try: 72 | data = datain[-int(self.points):] 73 | self.objective_acquisition = data 74 | self.objective_mean = np.mean(self.objective_acquisition) 75 | self.objective_stdev = np.std(self.objective_acquisition) 76 | self.statistic = self.stats.compute(data) 77 | except: # if average fails use the scalar input 78 | print("Detector is not a waveform PV, using scalar value") 79 | self.objective_acquisition = datain 80 | self.objective_mean = datain 81 | self.objective_stdev = -1 82 | self.statistic = datain 83 | 84 | 85 | return datain 86 | 87 | def clean(self): 88 | Target.clean(self) 89 | self.objective_acquisitions = [] # all the points 90 | self.objective_means = [] 91 | self.std_dev = [] 92 | self.charge = [] 93 | self.current = [] 94 | self.losses = [] 95 | -------------------------------------------------------------------------------- /mint/demo/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/mint/demo/__init__.py -------------------------------------------------------------------------------- /mint/demo/demo_interface.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function 2 | 3 | import os 4 | import random 5 | 6 | from mint.opt_objects import MachineInterface 7 | 8 | 9 | class DemoInterface(MachineInterface): 10 | name = 'DemoInterface' 11 | 12 | def __init__(self, args=None): 13 | super(DemoInterface, self).__init__(args=args) 14 | 15 | # self.config_dir is path to a directory where a default config will be saved (the tool state) 16 | # self.config_dir = "/parameters/" is default path in the parent class MachineInterface 17 | self.config_dir = os.path.join(self.config_dir, "demo") # /parameters/demo 18 | 19 | # self.path2jsondir is a path to a folder where optimization histories will be saved in json format 20 | # by default self.path2jsondir = on the same level that 21 | # the folder will be created automatically 22 | 23 | # flag from LCLSInterface which not allow Optimizer to write to control system 24 | self.read_only = False 25 | 26 | def get_value(self, channel): 27 | print("Called get_value for channel: {}.".format(channel)) 28 | return random.random() 29 | 30 | def set_value(self, channel, val): 31 | print("Called set_value for channel: {}, with value: {}".format(channel, val)) 32 | -------------------------------------------------------------------------------- /mint/flash/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/mint/flash/__init__.py -------------------------------------------------------------------------------- /mint/flash/flash_obj_function.py: -------------------------------------------------------------------------------- 1 | """ 2 | Objective Function 3 | 4 | S.Tomin, 2017 5 | """ 6 | 7 | from mint.opt_objects import Target 8 | import numpy as np 9 | import time 10 | 11 | 12 | class XFELTarget(Target): 13 | """ 14 | Objective function 15 | 16 | :param mi: Machine interface 17 | :param pen_max: 100, maximum penalty 18 | :param niter: 0, calls number get_penalty() 19 | :param penalties: [], appending penalty 20 | :param times: [], appending the time evolution of get_penalty() 21 | :param nreadings: 1, number of objective function readings 22 | :param interval: 0 (secunds), interval between readings 23 | """ 24 | def __init__(self, mi=None, eid="orbit"): 25 | super(XFELTarget, self).__init__(eid=eid) 26 | self.mi = mi 27 | 28 | def read_bpms(self, bpms, nreadings): 29 | orbits = np.zeros((nreadings, len(bpms))) 30 | for i in range(nreadings): 31 | for j, bpm in enumerate(bpms): 32 | orbits[i, j] = self.mi.get_value(bpm) 33 | time.sleep(0.1) 34 | return np.mean(orbits, axis=0) 35 | 36 | def get_value(self): 37 | """ 38 | Method to get signal of target function (e.g. SASE signal). 39 | 40 | :return: value 41 | XFEL.RF/LLRF.CONTROLLER/CTRL.A1.I1/SP.AMPL 42 | """ 43 | bpms = [ 44 | "XFEL.DIAG/BPM/BPME.2252.SA2/X.ALL", 45 | "XFEL.DIAG/BPM/BPME.2258.SA2/X.ALL", 46 | "XFEL.DIAG/BPM/BPME.2264.SA2/X.ALL", 47 | 48 | ] 49 | 50 | orbit1 = self.read_bpms(bpms=bpms, nreadings=7) 51 | 52 | orbit2 = np.zeros(len(bpms)) # just [0, 0, 0, ... ] 53 | 54 | 55 | target = np.sqrt(np.sum((orbit2 - orbit1)**2)) 56 | return target -------------------------------------------------------------------------------- /mint/hipa/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/mint/hipa/__init__.py -------------------------------------------------------------------------------- /mint/lcls/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/mint/lcls/__init__.py -------------------------------------------------------------------------------- /mint/lcls/lcls_devices.py: -------------------------------------------------------------------------------- 1 | import time 2 | import numpy as np 3 | from ..opt_objects import Device 4 | 5 | 6 | class LCLSDevice(Device): 7 | def __init__(self, eid=None, mi=None): 8 | super(LCLSDevice, self).__init__(eid=eid) 9 | self.mi = mi 10 | self.value_percent = 25.0 11 | self.range_percent = 2.0 12 | 13 | def get_delta(self): 14 | """ 15 | Calculate and return the travel range for this device. 16 | 17 | :return: (float) Travel Range 18 | """ 19 | ll, hl = self.get_limits() 20 | val = self.get_value() 21 | 22 | # Method 1: % of Range 23 | m1 = np.abs((hl-ll)*self.range_percent/100.0) 24 | 25 | # Method 2: % of Current Value 26 | m2 = np.abs(val*self.value_percent/100.0) 27 | 28 | # Method 3: Mean(M1, M2) 29 | m3 = (m1+m2)/2.0 30 | 31 | if m1 != 0.0 and m2 != 0.0: 32 | return m3 33 | if m1 == 0: 34 | return m2 35 | else: 36 | return m1 37 | 38 | def update_limits_from_pv(self): 39 | ll = self.mi.get_value(self.pv_low) 40 | hl = self.mi.get_value(self.pv_high) 41 | self.default_limits = [ll, hl] 42 | self.low_limit = self.default_limits[0] 43 | self.high_limit = self.default_limits[1] 44 | print("Limits for {} are: {}".format(self.eid, self.default_limits)) 45 | 46 | def get_limits(self): 47 | return self.low_limit, self.high_limit 48 | 49 | def set_low_limit(self, val): 50 | if not hasattr(self, 'pv_low'): 51 | super(LCLSDevice, self).set_low_limit(val) 52 | return 53 | 54 | self.update_limits_from_pv() 55 | if val >= self.high_limit-0.0001: 56 | return 57 | if val >= self.default_limits[0]: 58 | self.low_limit = val 59 | else: 60 | self.low_limit = self.default_limits[0] 61 | 62 | def set_high_limit(self, val): 63 | if not hasattr(self, 'pv_high'): 64 | super(LCLSDevice, self).set_high_limit(val) 65 | return 66 | 67 | self.update_limits_from_pv() 68 | if val <= self.low_limit+0.0001: 69 | return 70 | if val <= self.default_limits[1]: 71 | self.high_limit = val 72 | else: 73 | self.high_limit = self.default_limits[1] 74 | 75 | 76 | class LCLSQuad(LCLSDevice): 77 | def __init__(self, eid=None, mi=None): 78 | super(LCLSQuad, self).__init__(eid=eid, mi=mi) 79 | self._can_edit_limits = True 80 | if eid.endswith(':BACT') or eid.endswith(":BCTRL"): 81 | prefix = eid[:eid.rfind(':')+1] 82 | else: 83 | prefix = eid+":" 84 | self.pv_set = "{}{}".format(prefix, "BCTRL") 85 | self.pv_read = "{}{}".format(prefix, "BACT") 86 | self.pv_low = "{}{}".format(prefix, "BCTRL.DRVL") 87 | self.pv_high = "{}{}".format(prefix, "BCTRL.DRVH") 88 | print("Let's get the limits....") 89 | self.update_limits_from_pv() 90 | 91 | def set_value(self, val): 92 | self.target = val 93 | self.mi.set_value(self.eid, val) 94 | 95 | def get_value(self, save=False): 96 | if self.mi.read_only: 97 | val = self.target 98 | if val is None: 99 | val = self.mi.get_value(self.pv_read) 100 | else: 101 | val = self.mi.get_value(self.pv_read) 102 | if save: 103 | self.values.append(val) 104 | self.times.append(time.time()) 105 | 106 | return val 107 | -------------------------------------------------------------------------------- /mint/lcls/lcls_obj_function.py: -------------------------------------------------------------------------------- 1 | import time 2 | import numpy as np 3 | 4 | from mint.opt_objects import Target 5 | import stats.stats as stats 6 | 7 | 8 | class SLACTarget(Target): 9 | def __init__(self, mi=None, eid='GDET:FEE1:241:ENRCHSTBR'): 10 | """ 11 | :param mi: Machine interface 12 | :param eid: ID 13 | """ 14 | super(SLACTarget, self).__init__(eid=eid) 15 | 16 | self.mi = mi 17 | self.kill = False 18 | self.objective_acquisition = None 19 | self.objective_mean = None 20 | self.objective_stdev = None 21 | 22 | self.objective_acquisitions = [] # all the points 23 | self.objective_means = [] 24 | self.std_dev = [] 25 | self.charge = [] 26 | self.current = [] 27 | self.losses = [] 28 | self.points = None 29 | 30 | def get_penalty(self): 31 | sase, std, charge, current, losses = self.get_value() 32 | alarm = self.get_alarm() 33 | pen = 0.0 34 | if alarm > 1.0: 35 | return self.pen_max 36 | if alarm > 0.7: 37 | return alarm * 50.0 38 | pen += alarm 39 | pen -= sase 40 | self.penalties.append(pen) 41 | self.times.append(time.time()) 42 | self.values.append(sase) # statistic 43 | self.objective_acquisitions.append(self.objective_acquisition) # array of points 44 | self.objective_means.append(self.objective_mean) 45 | self.std_dev.append(std) 46 | self.alarms.append(alarm) 47 | self.charge.append(charge) 48 | self.current.append(current) 49 | self.losses.append(losses) 50 | self.niter += 1 51 | return pen 52 | 53 | def get_value(self): 54 | """ 55 | Returns data for the ojective function (sase) from the selected detector PV. 56 | 57 | At lcls the repetition is 120Hz and the readout buf size is 2800. 58 | The last 120 entries correspond to pulse energies over past 1 second. 59 | 60 | Returns: 61 | Float of SASE or other detecor measurement 62 | """ 63 | #datain = [] 64 | #for i in range(self.nreadings): 65 | # datain.extend(self.mi.get_value(self.eid)) 66 | # time.sleep(self.interval) 67 | if self.points is None: 68 | self.points = 120 69 | print("Get Value of : ", self.points, " points.") 70 | 71 | try: 72 | rate = self.mi.get_beamrate() 73 | nap_time = self.points/(rate*1.0) 74 | except Exception as ex: 75 | nap_time = 1 76 | print("Something went wrong with the beam rate calculation. Let's sleep 1 second.") 77 | print("Exception was: ", ex) 78 | 79 | time.sleep(nap_time) 80 | 81 | datain = self.mi.get_value(self.eid) 82 | 83 | if self.stats is None: 84 | self.stats = stats.StatNone 85 | 86 | try: 87 | data = datain[-int(self.points):] 88 | self.objective_acquisition = data 89 | self.objective_mean = np.mean(self.objective_acquisition) 90 | self.objective_stdev = np.std(self.objective_acquisition) 91 | self.statistic = self.stats.compute(data) 92 | except: # if average fails use the scalar input 93 | print("Detector is not a waveform PV, using scalar value") 94 | self.objective_acquisition = datain 95 | self.objective_mean = datain 96 | self.objective_stdev = -1 97 | self.statistic = datain 98 | 99 | print(self.stats.display_name, ' of ', self.objective_acquisition.size, ' points is ', self.statistic, 100 | ' and standard deviation is ', self.objective_stdev) 101 | 102 | charge, current = self.mi.get_charge_current() 103 | losses = self.mi.get_losses() 104 | return self.statistic, self.objective_stdev, charge, current, losses 105 | 106 | def clean(self): 107 | Target.clean(self) 108 | self.objective_acquisitions = [] # all the points 109 | self.objective_means = [] 110 | self.std_dev = [] 111 | self.charge = [] 112 | self.current = [] 113 | self.losses = [] 114 | -------------------------------------------------------------------------------- /mint/linac4/linac4_interface.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | Linac4 machine interface 5 | S.Tomin, I.Agapov 2019 6 | """ 7 | from __future__ import absolute_import, print_function 8 | import sys 9 | import os 10 | import sys 11 | import numpy as np 12 | import subprocess 13 | import base64 14 | from mint.opt_objects import MachineInterface, Device, TestDevice 15 | from collections import OrderedDict 16 | from datetime import datetime 17 | import json 18 | import time 19 | import zmq 20 | import pickle 21 | 22 | 23 | class Linac4MachineInterface(MachineInterface): 24 | #class Linac4MachineInterface(): 25 | """ 26 | Machine Interface for Linac4 27 | need ZMQ server to communicate with 28 | """ 29 | name = 'Linac4MachineInterface' 30 | 31 | def __init__(self, args=None): 32 | super(Linac4MachineInterface, self).__init__(args) 33 | 34 | self.logbook_name = "" 35 | 36 | path2root = os.path.abspath(os.path.join(__file__ , "../../../..")) 37 | self.config_dir = os.path.join(path2root, "config_optim") 38 | self.timestamp = time.time() 39 | 40 | self.connect2server() 41 | 42 | def connect2server(self): 43 | self.context = zmq.Context() 44 | print("Conncting to server...") 45 | self.socket = self.context.socket(zmq.REQ) 46 | self.socket.connect("tcp://localhost:5556") 47 | 48 | def get_value(self, channel): 49 | """ 50 | Getter function for XFEL. 51 | 52 | :param channel: (str) String of the devices name used in doocs 53 | :return: Data from pydoocs.read(), variable data type depending on channel 54 | """ 55 | 56 | obj = {"cmd": "get", "name": channel} 57 | b = pickle.dumps(obj) 58 | self.socket.send(b) 59 | b = self.socket.recv() 60 | obj2 = pickle.loads(b) 61 | return obj2["val"] 62 | 63 | def set_value(self, channel, val): 64 | """ 65 | Method to set value to a channel 66 | 67 | :param channel: (str) String of the devices name used in doocs 68 | :param val: value 69 | :return: None 70 | """ 71 | obj = {"cmd": "set", "name": channel, "val": val} 72 | self.socket.send(pickle.dumps(obj)) 73 | b = self.socket.recv() 74 | obj2 = pickle.loads(b) 75 | return 76 | 77 | def get_obj_function_module(self): 78 | from mint.linac4 import linac4_obj_function 79 | return linac4_obj_function 80 | 81 | 82 | # test interface 83 | if __name__ == '__main__': 84 | print('started...') 85 | mi = Linac4MachineInterface() 86 | print('the end...') 87 | 88 | 89 | -------------------------------------------------------------------------------- /mint/linac4/linac4_obj_function.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | Objective Function 5 | 6 | S.Tomin, 2017 7 | """ 8 | 9 | from mint.opt_objects import Target 10 | import numpy as np 11 | import time 12 | 13 | 14 | class XFELTarget(Target): 15 | """ 16 | Objective function 17 | 18 | :param mi: Machine interface 19 | :param pen_max: 100, maximum penalty 20 | :param niter: 0, calls number get_penalty() 21 | :param penalties: [], appending penalty 22 | :param times: [], appending the time evolution of get_penalty() 23 | :param nreadings: 1, number of objective function readings 24 | :param interval: 0 (secunds), interval between readings 25 | """ 26 | def __init__(self, mi=None, eid="orbit"): 27 | super(XFELTarget, self).__init__(eid=eid) 28 | self.mi = mi 29 | 30 | def read_bpms(self, bpms, nreadings): 31 | orbits = np.zeros((nreadings, len(bpms))) 32 | for i in range(nreadings): 33 | for j, bpm in enumerate(bpms): 34 | orbits[i, j] = self.mi.get_value(bpm) 35 | time.sleep(0.1) 36 | return np.mean(orbits, axis=0) 37 | 38 | def get_alarm(self): 39 | alarm = self.mi.get_values("transmission") 40 | return alarm 41 | 42 | def get_value(self): 43 | """ 44 | Method to get signal of target function (e.g. SASE signal). 45 | 46 | :return: value 47 | XFEL.RF/LLRF.CONTROLLER/CTRL.A1.I1/SP.AMPL 48 | """ 49 | bpms = [ 50 | 51 | 52 | ] 53 | 54 | orbit1 = self.read_bpms(bpms=bpms, nreadings=1) 55 | 56 | orbit2 = np.zeros(len(bpms)) # just [0, 0, 0, ... ] 57 | 58 | 59 | target = np.sqrt(np.sum((orbit2 - orbit1)**2)) 60 | return target 61 | -------------------------------------------------------------------------------- /mint/petra/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/mint/petra/__init__.py -------------------------------------------------------------------------------- /mint/petra/petra_obj_function.py: -------------------------------------------------------------------------------- 1 | """ 2 | Objective Function 3 | 4 | S.Tomin, 2017 5 | """ 6 | from __future__ import absolute_import, print_function 7 | 8 | from mint.opt_objects import Target 9 | import numpy as np 10 | import time 11 | 12 | 13 | class PETRATarget(Target): 14 | """ 15 | Objective function 16 | 17 | :param mi: Machine interface 18 | :param pen_max: 100, maximum penalty 19 | :param niter: 0, calls number get_penalty() 20 | :param penalties: [], appending penalty 21 | :param times: [], appending the time evolution of get_penalty() 22 | :param nreadings: 1, number of objective function readings 23 | :param interval: 0 (secunds), interval between readings 24 | """ 25 | def __init__(self, mi=None, eid="x57**2 + y57**2 + x59**2 + y59"): 26 | super(PETRATarget, self).__init__(eid=eid) 27 | self.mi = mi 28 | self.debug = False 29 | self.kill = False 30 | self.pen_max = 100 31 | self.clean() 32 | self.nreadings = 1 33 | self.interval = 0.0 34 | 35 | def get_alarm(self): 36 | """ 37 | Method to get alarm level (e.g. BLM value). 38 | 39 | alarm level must be normalized: 0 is min, 1 is max 40 | 41 | :return: alarm level 42 | """ 43 | return 0 44 | 45 | def read_bpms(self, bpms, nreadings): 46 | orbits = np.zeros((nreadings, len(bpms))) 47 | for i in range(nreadings): 48 | for j, bpm in enumerate(bpms): 49 | orbits[i, j] = self.mi.get_value(bpm) 50 | time.sleep(0.1) 51 | return np.mean(orbits, axis=0) 52 | 53 | def get_value(self): 54 | """ 55 | Method to get signal of target function (e.g. SASE signal). 56 | 57 | :return: value 58 | XFEL.RF/LLRF.CONTROLLER/CTRL.A1.I1/SP.AMPL 59 | """ 60 | bpms = ["XFEL.DIAG/BPM/BPMA.59.I1/X.ALL", 61 | "XFEL.DIAG/BPM/BPMA.72.I1/X.ALL", 62 | "XFEL.DIAG/BPM/BPMA.75.I1/X.ALL", 63 | "XFEL.DIAG/BPM/BPMA.77.I1/X.ALL", 64 | "XFEL.DIAG/BPM/BPMA.80.I1/X.ALL", 65 | "XFEL.DIAG/BPM/BPMA.82.I1/X.ALL", 66 | "XFEL.DIAG/BPM/BPMA.85.I1/X.ALL", 67 | "XFEL.DIAG/BPM/BPMA.87.I1/X.ALL", 68 | "XFEL.DIAG/BPM/BPMA.90.I1/X.ALL", 69 | "XFEL.DIAG/BPM/BPMA.92.I1/X.ALL", 70 | "XFEL.DIAG/BPM/BPMF.95.I1/X.ALL", 71 | "XFEL.DIAG/BPM/BPMC.134.L1/X.ALL", 72 | "XFEL.DIAG/BPM/BPMA.117.I1/X.ALL", 73 | "XFEL.DIAG/BPM/BPMC.158.L1/X.ALL", 74 | "XFEL.DIAG/BPM/BPMA.179.B1/X.ALL"] 75 | Vinit = self.mi.get_value("XFEL.RF/LLRF.CONTROLLER/CTRL.A1.I1/SP.AMPL") 76 | 77 | orbit1 = self.read_bpms(bpms=bpms, nreadings=7) 78 | 79 | time.sleep(0.1) 80 | self.mi.set_value("XFEL.RF/LLRF.CONTROLLER/CTRL.A1.I1/SP.AMPL", Vinit - 2) 81 | time.sleep(0.9) 82 | 83 | orbit2 = self.read_bpms(bpms=bpms, nreadings=7) 84 | 85 | self.mi.set_value("XFEL.RF/LLRF.CONTROLLER/CTRL.A1.I1/SP.AMPL", Vinit) 86 | time.sleep(0.9) 87 | 88 | target = -np.sqrt(np.sum((orbit2 - orbit1)**2)) 89 | return target 90 | #return -np.sqrt(a ** 2 + b ** 2 + c**2) 91 | 92 | def get_value_test(self): 93 | """ 94 | For testing 95 | 96 | :return: 97 | """ 98 | values = np.array([dev.get_value() for dev in self.devices]) 99 | value = 2*np.sum(np.exp(-np.power((values - np.ones_like(values)), 2) / 5.)) 100 | value = value * (1. + (np.random.rand(1)[0] - 0.5) * 0.001) 101 | return value 102 | 103 | 104 | def get_penalty(self): 105 | """ 106 | Method to calculate the penalty on the basis of the value and alarm level. 107 | 108 | penalty = -get_value() + alarm() 109 | 110 | 111 | :return: penalty 112 | """ 113 | sase = 0. 114 | for i in range(self.nreadings): 115 | sase += self.get_value() 116 | time.sleep(self.interval) 117 | sase = sase/self.nreadings 118 | print("SASE", sase) 119 | alarm = self.get_alarm() 120 | if self.debug: print('alarm:', alarm) 121 | if self.debug: print('sase:', sase) 122 | pen = 0.0 123 | if alarm > 1.0: 124 | return self.pen_max 125 | if alarm > 0.7: 126 | return alarm * self.pen_max / 2. 127 | pen += alarm 128 | pen -= sase 129 | if self.debug: print('penalty:', pen) 130 | self.niter += 1 131 | # print("niter = ", self.niter) 132 | self.penalties.append(pen) 133 | self.times.append(time.time()) 134 | self.values.append(sase) 135 | self.alarms.append(alarm) 136 | return pen 137 | 138 | def get_spectrum(self): 139 | return [0, 0] 140 | 141 | def get_stat_params(self): 142 | # spetrum = self.get_spectrum() 143 | # ave = np.mean(spetrum[(2599 - 5 * 120):-1]) 144 | # std = np.std(spetrum[(2599 - 5 * 120):-1]) 145 | ave = self.get_value() 146 | std = 0.1 147 | return ave, std 148 | 149 | def get_energy(self): 150 | return 3 151 | 152 | def clean(self): 153 | self.niter = 0 154 | self.penalties = [] 155 | self.times = [] 156 | self.alarms = [] 157 | self.values = [] 158 | -------------------------------------------------------------------------------- /mint/sls/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/mint/sls/__init__.py -------------------------------------------------------------------------------- /mint/sls/sls_interface.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function 2 | import os 3 | import random 4 | 5 | try: 6 | import epics 7 | except: 8 | pass # Show message on Constructor if we try to use it. 9 | 10 | from mint.opt_objects import MachineInterface 11 | from PyQt5.QtWidgets import QWidget 12 | 13 | 14 | class SLSInterface(MachineInterface): 15 | name = 'SLSInterface' 16 | 17 | def __init__(self, args=None): 18 | super(SLSInterface, self).__init__(args=args) 19 | self.config_dir = os.path.join(self.config_dir, "SLS") 20 | self.read_only = False 21 | self.pvs = dict() 22 | path2root = os.path.abspath(os.path.join(__file__ , "../../../..")) 23 | self.config_dir = os.path.join(path2root, "config_optim") 24 | 25 | def get_value(self, device_name): 26 | pv = self.pvs.get(device_name, None) 27 | if pv is None: 28 | self.pvs[device_name] = epics.get_pv(device_name) 29 | return self.pvs[device_name].get() 30 | else: 31 | if not pv.connected: 32 | return None 33 | else: 34 | return pv.get() 35 | 36 | def set_value(self, device_name, val): 37 | pv = self.pvs.get(device_name, None) 38 | if pv is None: 39 | self.pvs[device_name] = epics.get_pv(device_name) 40 | return None 41 | else: 42 | if not pv.connected: 43 | return None 44 | else: 45 | return pv.put(val) 46 | 47 | 48 | def get_charge(self): 49 | return self.get_value("ARS07-DPCT-0000:CURR") 50 | 51 | def screenShot(self, gui, filename, filetype="png"): 52 | """ 53 | Takes a screenshot of the whole gui window, saves png and ps images to file 54 | :param filename: (str) Directory string of where to save the file 55 | :param filetype: (str) String of the filetype to save 56 | :return: 57 | """ 58 | 59 | s = str(filename) + "." + str(filetype) 60 | p = QWidget.grab(gui.Form) 61 | p.save(s, 'png') 62 | p = p.scaled(465, 400) 63 | # save again a small image to use for the logbook thumbnail 64 | p.save(str(s[:-4]) + "_sm.png", 'png') 65 | 66 | def logbook(self, gui): 67 | objective_func = gui.Form.objective_func 68 | objective_func_pv = objective_func.eid 69 | message = "" 70 | if len(objective_func.values) > 0: 71 | message = "Gain (" + str(objective_func_pv) + "): " + str(round(objective_func.values[0], 4)) + " -> " + str( 72 | round(objective_func.values[-1], 4)) 73 | message += "\nIterations: " + str(objective_func.niter) + "\n" 74 | message += "Trim delay: " + str(gui.sb_tdelay.value()) +"\n" 75 | message += "Points Requested: " + str(objective_func.points) + "\n" 76 | message += "Normalization Amp Coeff: " + str(gui.sb_scaling_coef.value()) + "\n" 77 | message += "Type of optimization: " + str(gui.cb_select_alg.currentText()) + "\n" 78 | elog = "SLS" 79 | titel = "Ocelot Optimierung" 80 | eintrag = "Info" 81 | autor = "Ocelot" 82 | self.screenShot(gui, "screenshot", filetype="png") 83 | 84 | attachments = ["screenshot.png"] 85 | encoding = 1 86 | cmd = 'G_CS_ELOG_add -l "' + elog + '" ' 87 | cmd = cmd + '-a "Titel=' + titel + '" ' 88 | cmd = cmd + '-a "Eintrag=' + eintrag + '" ' 89 | cmd = cmd + '-a "Autor=' + autor + '" ' 90 | for attachment in attachments: 91 | cmd = cmd + '-f "' + attachment + '" ' 92 | cmd = cmd + '-n ' + str(encoding) 93 | cmd = cmd + ' "' + message + '"' 94 | import subprocess 95 | proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) 96 | (out, err) = proc.communicate() 97 | if (err is not None) and err!="": 98 | raise Exception(err) 99 | success = False 100 | else: 101 | success = True 102 | return success 103 | 104 | def get_preset_settings(self): 105 | presets = { 106 | "Test": [ 107 | {"display": "Playground Gauss", "filename": "test.json"} 108 | ] 109 | # "Aramis 2": [ 110 | # {"display": "3. undulator Ks", "filename": "Aramis_K.json"}, 111 | # {"display": "4. ", "filename": "Aramis_matching.json"}], 112 | # "Athos 1": [ 113 | # {"display": "1. matching quads", "filename": "Athos_matching.json"}, 114 | # {"display": "2. phase shifters", "filename": "Athos_phase_shifters.json"}], 115 | # "Athos 2": [ 116 | # {"display": "3. undulator Ks", "filename": "Athos_K.json"}, 117 | # {"display": "4. ", "filename": "Athos_matching.json"}] 118 | } 119 | return presets 120 | -------------------------------------------------------------------------------- /mint/spear/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/mint/spear/__init__.py -------------------------------------------------------------------------------- /mint/spear/spear_devices.py: -------------------------------------------------------------------------------- 1 | # -*- coding: iso-8859-1 -*- 2 | import time 3 | import numpy as np 4 | from ..opt_objects import Device 5 | 6 | 7 | 8 | class SPEARDevice(Device): 9 | def __init__(self, eid=None, mi=None): 10 | super(SPEARDevice, self).__init__(eid=eid) 11 | self.mi = mi 12 | self.value_percent = 25.0 13 | self.range_percent = 2.0 14 | 15 | def get_delta(self): 16 | """ 17 | Calculate and return the travel range for this device. 18 | 19 | :return: (float) Travel Range 20 | """ 21 | ll, hl = self.get_limits() 22 | val = self.get_value() 23 | 24 | # Method 1: % of Range 25 | m1 = np.abs((hl-ll)*self.range_percent/100.0) 26 | 27 | # Method 2: % of Current Value 28 | m2 = np.abs(val*self.value_percent/100.0) 29 | 30 | # Method 3: Mean(M1, M2) 31 | m3 = (m1+m2)/2.0 32 | 33 | if m1 != 0.0 and m2 != 0.0: 34 | return m3 35 | if m1 == 0: 36 | return m2 37 | else: 38 | return m1 39 | 40 | 41 | class SPEARMCORDevice(SPEARDevice): 42 | def __init__(self, eid=None, mi=None): 43 | super(SPEARMCORDevice, self).__init__(eid=eid, mi=mi) 44 | #self._can_edit_limits = True 45 | if eid.endswith(':Curr1') or eid.endswith(':CurrSetpt'): 46 | prefix = eid[:eid.rfind(':')+1] 47 | else: 48 | prefix = eid+':' 49 | self.pv_set = '{}{}'.format(prefix, 'CurrSetpt') 50 | self.pv_read = '{}{}'.format(prefix, 'Curr1') 51 | 52 | 53 | def get_delta(self): 54 | """ 55 | Calculate and return the travel range for this device. 56 | 57 | :return: (float) Travel Range 58 | """ 59 | return 30 60 | 61 | def set_value(self, val): 62 | self.target = val 63 | self.mi.set_value(self.pv_set, [val]) 64 | 65 | def get_value(self, save=False): 66 | if self.mi.read_only: 67 | val = self.target 68 | if val is None: 69 | val = self.mi.get_value(self.pv_read) 70 | else: 71 | val = self.mi.get_value(self.pv_read) 72 | if save: 73 | self.values.append(val) 74 | self.times.append(time.time()) 75 | 76 | return val 77 | 78 | -------------------------------------------------------------------------------- /mint/swissfel/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/mint/swissfel/__init__.py -------------------------------------------------------------------------------- /mint/swissfel/sf_interface.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function 2 | import os 3 | import random 4 | 5 | try: 6 | import epics 7 | except: 8 | pass # Show message on Constructor if we try to use it. 9 | 10 | from mint.opt_objects import MachineInterface 11 | from PyQt5.QtWidgets import QWidget 12 | 13 | 14 | class SwissFELInterface(MachineInterface): 15 | name = 'SwissFELInterface' 16 | 17 | def __init__(self, args=None): 18 | super(SwissFELInterface, self).__init__(args=args) 19 | self.config_dir = os.path.join(self.config_dir, "swissfel") 20 | self.read_only = False 21 | self.pvs = dict() 22 | path2root = os.path.abspath(os.path.join(__file__ , "../../../..")) 23 | self.config_dir = os.path.join(path2root, "config_optim") 24 | 25 | def get_value(self, device_name): 26 | pv = self.pvs.get(device_name, None) 27 | if pv is None: 28 | self.pvs[device_name] = epics.get_pv(device_name) 29 | return self.pvs[device_name].get() 30 | else: 31 | if not pv.connected: 32 | return None 33 | else: 34 | return pv.get() 35 | 36 | def set_value(self, device_name, val): 37 | pv = self.pvs.get(device_name, None) 38 | if pv is None: 39 | self.pvs[device_name] = epics.get_pv(device_name) 40 | return None 41 | else: 42 | if not pv.connected: 43 | return None 44 | else: 45 | return pv.put(val) 46 | 47 | 48 | def get_charge(self): 49 | return self.get_value("SINEG01-DBPM340:Q1") 50 | 51 | def screenShot(self, gui, filename, filetype="png"): 52 | """ 53 | Takes a screenshot of the whole gui window, saves png and ps images to file 54 | :param filename: (str) Directory string of where to save the file 55 | :param filetype: (str) String of the filetype to save 56 | :return: 57 | """ 58 | 59 | s = str(filename) + "." + str(filetype) 60 | p = QWidget.grab(gui.Form) 61 | p.save(s, 'png') 62 | p = p.scaled(465, 400) 63 | # save again a small image to use for the logbook thumbnail 64 | p.save(str(s[:-4]) + "_sm.png", 'png') 65 | 66 | def logbook(self, gui): 67 | objective_func = gui.Form.objective_func 68 | objective_func_pv = objective_func.eid 69 | message = "" 70 | if len(objective_func.values) > 0: 71 | message = "Gain (" + str(objective_func_pv) + "): " + str(round(objective_func.values[0], 4)) + " -> " + str( 72 | round(objective_func.values[-1], 4)) 73 | message += "\nIterations: " + str(objective_func.niter) + "\n" 74 | message += "Trim delay: " + str(gui.sb_tdelay.value()) +"\n" 75 | message += "Points Requested: " + str(objective_func.points) + "\n" 76 | message += "Normalization Amp Coeff: " + str(gui.sb_scaling_coef.value()) + "\n" 77 | message += "Type of optimization: " + str(gui.cb_select_alg.currentText()) + "\n" 78 | elog = "SwissFEL commissioning data" 79 | title = "title" 80 | category = "Info" 81 | application = "Ocelot" 82 | self.screenShot(gui, "screenshot", filetype="png") 83 | 84 | attachments = ["screenshot.png"] 85 | encoding = 1 86 | cmd = 'G_CS_ELOG_add -l "' + elog + '" ' 87 | cmd = cmd + '-a "Title=' + title + '" ' 88 | cmd = cmd + '-a "Category=' + category + '" ' 89 | cmd = cmd + '-a "Application=' + application + '" ' 90 | for attachment in attachments: 91 | cmd = cmd + '-f "' + attachment + '" ' 92 | cmd = cmd + '-n ' + str(encoding) 93 | cmd = cmd + ' "' + message + '"' 94 | import subprocess 95 | proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) 96 | (out, err) = proc.communicate() 97 | if (err is not None) and err!="": 98 | raise Exception(err) 99 | success = False 100 | else: 101 | success = True 102 | return success 103 | 104 | def get_preset_settings(self): 105 | presets = { 106 | "Aramis 1": [ 107 | {"display": "1. matching quads", "filename": "Aramis_matching.json"}, 108 | {"display": "2. phase shifters", "filename": "Aramis_phase_shifters.json"}], 109 | "Aramis 2": [ 110 | {"display": "3. undulator Ks", "filename": "Aramis_K.json"}, 111 | {"display": "4. ", "filename": "Aramis_matching.json"}], 112 | "Athos 1": [ 113 | {"display": "1. matching quads", "filename": "Athos_matching.json"}, 114 | {"display": "2. phase shifters", "filename": "Athos_phase_shifters.json"}], 115 | "Athos 2": [ 116 | {"display": "3. undulator Ks", "filename": "Athos_K.json"}, 117 | {"display": "4. ", "filename": "Athos_matching.json"}] 118 | } 119 | return presets 120 | -------------------------------------------------------------------------------- /mint/xfel/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/mint/xfel/__init__.py -------------------------------------------------------------------------------- /mint/xfel/xfel_obj_function.py: -------------------------------------------------------------------------------- 1 | """ 2 | Objective Function 3 | 4 | S.Tomin, 2017 5 | """ 6 | 7 | from mint.opt_objects import Target 8 | import numpy as np 9 | import time 10 | 11 | 12 | class XFELTarget(Target): 13 | """ 14 | Objective function 15 | 16 | :param mi: Machine interface 17 | :param dp: Device property 18 | :param pen_max: 100, maximum penalty 19 | :param niter: 0, calls number get_penalty() 20 | :param penalties: [], appending penalty 21 | :param times: [], appending the time evolution of get_penalty() 22 | :param nreadings: 1, number of objective function readings 23 | :param interval: 0 (secunds), interval between readings 24 | """ 25 | def __init__(self, mi=None, dp=None, eid="x57**2 + y57**2 + x59**2 + y59"): 26 | super(XFELTarget, self).__init__(eid=eid) 27 | 28 | def get_value(self): 29 | """ 30 | Method to get signal of target function (e.g. SASE signal). 31 | 32 | :return: value 33 | """ 34 | x57 = self.mi.get_value("XFEL.DIAG/ORBIT/BPMA.57.I1/X.SA1") 35 | y57 = self.mi.get_value("XFEL.DIAG/ORBIT/BPMA.57.I1/Y.SA1") 36 | x59 = self.mi.get_value("XFEL.DIAG/ORBIT/BPMA.59.I1/X.SA1") 37 | y59 = self.mi.get_value("XFEL.DIAG/ORBIT/BPMA.59.I1/Y.SA1") 38 | return -np.sqrt(x57 ** 2 + y57 ** 2 + x59 ** 2 + y59 ** 2) 39 | -------------------------------------------------------------------------------- /ocelot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/ocelot.png -------------------------------------------------------------------------------- /op_methods/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/op_methods/__init__.py -------------------------------------------------------------------------------- /op_methods/cobyla.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, absolute_import 2 | from mint.mint import * 3 | from scipy import optimize 4 | 5 | 6 | class Cobyla(Minimizer): 7 | def __init__(self): 8 | super(Cobyla, self).__init__() 9 | self.xtol = 1e-5 10 | self.dev_steps = None 11 | self.norm_scales = None 12 | self.cons = None 13 | 14 | def calc_scales(self): 15 | """ 16 | calculate scales for normalized simplex 17 | 18 | :return: np.array() - device_delta_limits * norm_coef 19 | """ 20 | 21 | if self.norm_scales is None: 22 | self.norm_scales = [None] * np.size(self.devices) 23 | 24 | for idx, dev in enumerate(self.devices): 25 | if self.norm_scales[idx] is not None: 26 | continue 27 | delta = dev.get_delta() 28 | if delta == 0: 29 | delta = 1 30 | self.norm_scales[idx] = delta 31 | self.norm_scales = np.array(self.norm_scales) 32 | 33 | return self.norm_scales 34 | 35 | def calc_constraints(self): 36 | 37 | def make_lambda(indx, b): 38 | return lambda x: x[indx] + b 39 | 40 | cons = [] 41 | for idx, dev in enumerate(self.devices): 42 | if dev.get_delta() == 0: 43 | continue 44 | high = (dev.high_limit - self.x_init[idx])/self.norm_scales[idx]/self.norm_coef/self.scaling_coef 45 | con = {'type': 'ineq', 'fun': make_lambda(idx, -high)} 46 | cons.append(con) 47 | low = (dev.low_limit - self.x_init[idx]) / self.norm_scales[idx]/self.norm_coef/self.scaling_coef 48 | con = {'type': 'ineq', 'fun': make_lambda(idx, -low)} 49 | cons.append(con) 50 | 51 | return cons 52 | 53 | def unnormalize(self, xnorm): 54 | # 1.0 is used because of the 'rhobeg': 1.0. 55 | 56 | delta_x = np.array(xnorm)*self.scaling_coef 57 | delta_x_scaled = delta_x/1.0 * self.norm_scales * self.norm_coef 58 | x = self.x_init + delta_x_scaled 59 | print("xnorm = ", xnorm) 60 | print("norm_scales = ", self.norm_scales ) 61 | print("norm_coef = ", self.norm_coef) 62 | print("scaling_coef = ", self.scaling_coef) 63 | print("delta_x = ", delta_x) 64 | print("X Init: ", self.x_init) 65 | print("X: ", x) 66 | return x 67 | 68 | def normalize(self, x): 69 | xnorm = np.zeros_like(x) 70 | return xnorm 71 | 72 | def preprocess(self): 73 | """ 74 | defining attribute self.dev_steps 75 | 76 | :return: 77 | """ 78 | self.calc_scales() 79 | self.cons = self.calc_constraints() 80 | 81 | def minimize(self, error_func, x): 82 | if self.cons is None: 83 | self.cons = () 84 | 85 | res = optimize.minimize(error_func, x, tol=self.xtol, method='COBYLA', constraints=list(self.cons), 86 | options={'rhobeg': 1.0, 'maxiter': self.max_iter, 'disp': False, 'catol': 0.0002}) 87 | 88 | return res 89 | -------------------------------------------------------------------------------- /op_methods/custom_minimizer.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, absolute_import 2 | from mint.mint import * 3 | 4 | class CustomMinimizer(Minimizer): 5 | def __init__(self): 6 | super(CustomMinimizer, self).__init__() 7 | self.dev_steps = [0.05] 8 | 9 | def minimize(self, error_func, x): 10 | def custmin(fun, x0, args=(), maxfev=None, stepsize=[0.1], 11 | maxiter=self.max_iter, callback=None, **options): 12 | 13 | print("inside ", stepsize) 14 | 15 | if np.size(stepsize) != np.size(x0): 16 | stepsize = np.ones(np.size(x0))*stepsize[0] 17 | print("inside ", stepsize) 18 | bestx = x0 19 | besty = fun(x0) 20 | print("BEST", bestx, besty) 21 | funcalls = 1 22 | niter = 0 23 | improved = True 24 | stop = False 25 | 26 | while improved and not stop and niter < maxiter: 27 | improved = False 28 | niter += 1 29 | for dim in range(np.size(x0)): 30 | for s in [bestx[dim] - stepsize[dim], bestx[dim] + stepsize[dim]]: 31 | print("custom", niter, dim, s) 32 | testx = np.copy(bestx) 33 | testx[dim] = s 34 | testy = fun(testx, *args) 35 | funcalls += 1 36 | if testy < besty: 37 | besty = testy 38 | bestx = testx 39 | improved = True 40 | if callback is not None: 41 | callback(bestx) 42 | if maxfev is not None and funcalls >= maxfev: 43 | stop = True 44 | break 45 | 46 | return OptimizeResult(fun=besty, x=bestx, nit=niter, 47 | nfev=funcalls, success=(niter > 1)) 48 | res = optimize.minimize(error_func, x, method=custmin, options=dict(stepsize=self.dev_steps)) 49 | return res -------------------------------------------------------------------------------- /op_methods/gp_gpy.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, absolute_import 2 | import GPy 3 | from mint.mint import * 4 | from op_methods.simplex import * 5 | from scipy.optimize import * 6 | 7 | class GPgpy(Minimizer): 8 | def __init__(self): 9 | super(GPgpy, self).__init__() 10 | self.xtol = 1e-5 11 | self.dev_steps = None 12 | 13 | def seed_simplex(self): 14 | opt_smx = Optimizer() 15 | opt_smx.maximization = self.maximize 16 | opt_smx.norm_coef = self.norm_coef 17 | opt_smx.timeout = self.seed_timeout 18 | opt_smx.opt_ctrl = self.opt_ctrl 19 | minimizer = Simplex() 20 | minimizer.max_iter = 20 21 | minimizer.dev_steps = self.dev_steps 22 | #print("MAX iter", self.seed_iter) 23 | opt_smx.minimizer = minimizer 24 | # opt.debug = True 25 | seq = [Action(func=opt_smx.max_target_func, args=[self.target, self.devices])] 26 | opt_smx.eval(seq) 27 | #print(opt_smx.opt_ctrl.dev_sets) 28 | self.x_obs = np.vstack(opt_smx.opt_ctrl.dev_sets) 29 | self.y_obs = np.array(opt_smx.opt_ctrl.penalty) 30 | self.y_sigma_obs = np.mean(self.target.std_dev) 31 | 32 | def gp_unnormalize(self, xnorm): 33 | ll = np.array([dev.get_limits()[0] for dev in self.devices]) 34 | hl = np.array([dev.get_limits()[1] for dev in self.devices]) 35 | c = (hl + ll)/2 36 | d = hl - ll 37 | x = xnorm * d/2 +c 38 | return x 39 | 40 | def gp_normalize(self, x): 41 | ll = np.array([dev.get_limits()[0] for dev in self.devices]) 42 | hl = np.array([dev.get_limits()[1] for dev in self.devices]) 43 | c = (hl + ll)/2 44 | d = hl - ll 45 | xnorm = 2*(x - c)/d 46 | return xnorm 47 | 48 | 49 | def init_gp(self, X, Y): 50 | ndim = np.shape(X)[1] 51 | self.bounds = Bounds(np.ones(ndim)*-0.99, np.ones(ndim)*0.99) 52 | self.bounds = [(-0.99, 0.99) for dev in self.devices] 53 | self.kernel = GPy.kern.RBF(input_dim=ndim, variance=np.mean(self.target.std_dev)**2, lengthscale=0.5) 54 | self.model = GPy.models.GPRegression(X, Y, self.kernel) 55 | 56 | # optimize and plot 57 | self.model.optimize(messages=True, max_f_eval=1000) 58 | self.model.Gaussian_noise = np.mean(self.target.std_dev) 59 | 60 | 61 | def gp_predict(self, x, model): 62 | ndim = np.shape(self.Xnorm)[1] 63 | x = np.reshape(x, (-1, ndim)) 64 | # print(np.shape(x)) 65 | f, v = model.predict(x) 66 | # print(f, v) 67 | return (f) 68 | 69 | def one_step(self, error_func, x): 70 | x = [dev.get_value() for dev in self.devices] 71 | print("start GP") 72 | 73 | #res = minimize(self.gp_predict, np.array(x), args=(self.model,), bounds=self.bounds, method='L-BFGS-B') 74 | #res = differential_evolution(self.gp_predict, args=(self.model,), bounds=self.bounds) 75 | xnew = fmin(self.gp_predict, np.array(x), args=(self.model,)) 76 | #xnew = res.x 77 | xnew_unnorm = self.gp_unnormalize(xnew) 78 | ynew = error_func(xnew_unnorm) 79 | 80 | self.Xnorm = np.append(self.Xnorm, np.array([xnew, ]), axis=0) 81 | #X = np.array([d.values for d in self.devices]) 82 | #Y = np.array(self.target.penalties).reshape(-1, 1) 83 | self.Y = np.append(self.Y , np.array([ynew,]).reshape(-1, 1), axis=0) 84 | #print("NEW = ", self.Y, self.Xnorm ) 85 | self.model.set_XY(X=self.Xnorm, Y=self.Y ) 86 | self.model.optimize(messages=True, max_f_eval=1000) 87 | self.model.Gaussian_noise = np.mean(self.target.std_dev) 88 | 89 | def minimize(self, error_func, x): 90 | #self.target_func = error_func 91 | 92 | self.seed_simplex() 93 | 94 | if self.opt_ctrl.kill: 95 | return 96 | self.Y = np.array(self.target.penalties[1:]).reshape(-1, 1) 97 | X = np.array([d.values for d in self.devices]).T 98 | self.Xnorm = self.gp_normalize(X) 99 | 100 | self.init_gp(self.Xnorm, self.Y ) 101 | 102 | for i in range(self.max_iter): 103 | if self.opt_ctrl.kill: 104 | return 105 | self.one_step(error_func, x) 106 | 107 | print("finish GP") 108 | return 109 | -------------------------------------------------------------------------------- /op_methods/gp_sklearn.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, absolute_import 2 | from mint.mint import * 3 | from op_methods.simplex import Simplex 4 | import sklearn 5 | sklearn_version = sklearn.__version__ 6 | if sklearn_version >= "0.18": 7 | from GP import gaussian_process as gp_sklearn 8 | 9 | 10 | class GaussProcessSKLearn(Minimizer): 11 | def __init__(self): 12 | super(GaussProcessSKLearn, self).__init__() 13 | self.seed_iter = 5 14 | self.seed_timeout = 0.1 15 | 16 | self.target = None 17 | self.devices = [] 18 | 19 | self.x_obs = [] 20 | self.y_obs = [] 21 | #GP parameters 22 | 23 | self.max_iter = 50 24 | self.norm_coef = 0.1 25 | self.kill = False 26 | self.opt_ctrl = None 27 | 28 | def seed_simplex(self): 29 | opt_smx = Optimizer() 30 | opt_smx.normalization = True 31 | opt_smx.maximization = self.maximize 32 | opt_smx.norm_coef = self.norm_coef 33 | opt_smx.timeout = self.seed_timeout 34 | opt_smx.opt_ctrl = self.opt_ctrl 35 | minimizer = Simplex() 36 | minimizer.max_iter = self.seed_iter 37 | opt_smx.minimizer = minimizer 38 | # opt.debug = True 39 | seq = [Action(func=opt_smx.max_target_func, args=[self.target, self.devices])] 40 | opt_smx.eval(seq) 41 | print(opt_smx.opt_ctrl.dev_sets) 42 | self.x_obs = np.vstack(opt_smx.opt_ctrl.dev_sets) 43 | self.y_obs = np.array(opt_smx.opt_ctrl.penalty) 44 | self.y_sigma_obs = np.zeros(len(self.y_obs)) 45 | 46 | def load_seed(self, x_sets, penalty, sigma_pen=None): 47 | 48 | self.x_obs = np.vstack(x_sets) 49 | self.y_obs = np.array(penalty) 50 | if sigma_pen == None: 51 | self.y_sigma_obs = np.zeros(len(self.y_obs)) 52 | else: 53 | self.y_sigma_obs = sigma_pen 54 | 55 | def preprocess(self): 56 | 57 | self.scanner = gp_sklearn.GP() 58 | self.scanner.opt_ctrl = self.opt_ctrl 59 | devs_std = [] 60 | devs_search_area = [] 61 | for dev in self.devices: 62 | lims = dev.get_limits() 63 | devs_std.append((lims[-1] - lims[0])/3.) 64 | x_vec = np.atleast_2d(np.linspace(lims[0], lims[-1], num=50)).T 65 | devs_search_area.append(x_vec) 66 | 67 | self.scanner.x_search = np.hstack(devs_search_area) 68 | self.scanner.x_obs = self.x_obs 69 | self.scanner.y_obs = self.y_obs 70 | self.scanner.y_sigma_obs = self.y_sigma_obs 71 | 72 | self.scanner.ck_const_value = (0.5*np.mean(self.scanner.y_obs))**2 + 0.1 73 | #self.scanner.ck_const_value_bounds = (self.scanner.ck_const_value,self.scanner.ck_const_value) 74 | self.scanner.rbf_length_scale = np.array(devs_std)/2. + 0.01 75 | #self.scanner.rbf_length_scale_bounds = (self.scanner.rbf_length_scale, self.scanner.rbf_length_scale) 76 | self.scanner.max_iter = self.max_iter 77 | 78 | def minimize(self, error_func, x): 79 | #self.target_func = error_func 80 | 81 | self.seed_simplex() 82 | if self.opt_ctrl.kill: 83 | return 84 | self.preprocess() 85 | x = [dev.get_value() for dev in self.devices] 86 | print("start GP") 87 | self.scanner.minimize(error_func, x) 88 | print("finish GP") 89 | return 90 | -------------------------------------------------------------------------------- /op_methods/powell.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, absolute_import 2 | from mint.mint import * 3 | from scipy import optimize 4 | 5 | 6 | class Powell(Minimizer): 7 | def __init__(self): 8 | super(Powell, self).__init__() 9 | self.xtol = 1e-5 10 | self.dev_steps = None 11 | 12 | def minimize(self, error_func, x): 13 | res = optimize.minimize(error_func, x, method='Powell', tol=self.xtol) 14 | return res 15 | -------------------------------------------------------------------------------- /op_methods/simplex.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, absolute_import 2 | from mint.mint import * 3 | from scipy import optimize 4 | from mint import normscales 5 | 6 | class Simplex(Minimizer): 7 | def __init__(self): 8 | super(Simplex, self).__init__() 9 | self.xtol = 1e-5 10 | self.dev_steps = None 11 | 12 | def preprocess(self): 13 | """ 14 | defining attribute self.dev_steps 15 | 16 | :return: 17 | """ 18 | if self.dev_steps is not None: 19 | return 20 | self.dev_steps = [] 21 | for dev in self.devices: 22 | if "istep" not in dev.__dict__: 23 | self.dev_steps = None 24 | return 25 | elif dev.istep is None or dev.istep == 0: 26 | self.dev_steps = None 27 | return 28 | else: 29 | self.dev_steps.append(dev.istep) 30 | 31 | def minimize(self, error_func, x): 32 | #print("start seed", np.count_nonzero(self.dev_steps)) 33 | if self.dev_steps == None or len(self.dev_steps) != len(x): 34 | print("initial simplex is None") 35 | isim = None 36 | elif np.count_nonzero(self.dev_steps) != len(x): 37 | print("There is zero step. Initial simplex is None") 38 | isim = None 39 | else: 40 | #step = np.ones(len(x))*0.05 41 | isim = np.zeros((len(x) + 1, len(x))) 42 | isim[0, :] = x 43 | for i in range(len(x)): 44 | vertex = np.zeros(len(x)) 45 | vertex[i] = self.dev_steps[i] 46 | isim[i + 1, :] = x + vertex 47 | print("ISIM = ", isim) 48 | #res = optimize.minimize(error_func, x, method='Nelder-Mead', tol=self.xtol, 49 | # options = {'disp': False, 'initial_simplex': [0.05, 0.05], 'maxiter': self.max_iter}) 50 | if scipy.__version__ < "0.18": 51 | res = optimize.fmin(error_func, x, maxiter=self.max_iter, maxfun=self.max_iter, xtol=self.xtol) 52 | else: 53 | res = optimize.fmin(error_func, x, maxiter=self.max_iter, maxfun=self.max_iter, xtol=self.xtol, initial_simplex=isim) 54 | 55 | #print("finish seed") 56 | return res 57 | 58 | 59 | class SimplexNorm(Simplex): 60 | def __init__(self): 61 | super(SimplexNorm, self).__init__() 62 | self.xtol = 1e-5 63 | self.dev_steps = None 64 | 65 | def calc_scales(self): 66 | """ 67 | calculate scales for normalized simplex 68 | 69 | :return: np.array() - device_delta_limits * norm_coef 70 | """ 71 | # TODO: normscales.normscales() after last upgrade was broken. Fix or delete 72 | #self.norm_scales = normscales.normscales(self.target.mi, self.devices) 73 | 74 | self.norm_scales = None 75 | 76 | if self.norm_scales is None: 77 | self.norm_scales = [None] * np.size(self.devices) 78 | 79 | for idx, dev in enumerate(self.devices): 80 | if self.norm_scales[idx] is not None: 81 | continue 82 | delta = dev.get_delta() 83 | self.norm_scales[idx] = delta 84 | self.norm_scales = np.array(self.norm_scales) 85 | 86 | # Randomize the initial steps of simplex - Talk to Joe if it fails 87 | #if isinstance(self.minimizer, Simplex): 88 | self.norm_scales *= np.sign(np.random.randn(self.norm_scales.size)) 89 | return self.norm_scales 90 | 91 | def unnormalize(self, xnorm): 92 | # 0.00025 is used for Simplex because of the fmin steps. 93 | 94 | delta_x = np.array(xnorm)*self.scaling_coef 95 | delta_x_scaled = delta_x/0.00025*self.norm_scales * self.norm_coef 96 | x = self.x_init + delta_x_scaled 97 | print("norm_scales = ", self.norm_scales ) 98 | print("norm_coef = ", self.norm_coef) 99 | print("scaling_coef = ", self.scaling_coef) 100 | print("delta_x = ", delta_x) 101 | print("X Init: ", self.x_init) 102 | print("X: ", x) 103 | return x 104 | 105 | def normalize(self, x): 106 | xnorm = np.zeros_like(x) 107 | return xnorm 108 | 109 | def preprocess(self): 110 | self.calc_scales() 111 | 112 | 113 | -------------------------------------------------------------------------------- /parameters/__init__.py: -------------------------------------------------------------------------------- 1 | from os import path 2 | path_to_hyps = path.join(path.dirname(path.realpath(__file__)), "hyperparameters.npy") 3 | path_to_hype3 = path.join(path.dirname(path.realpath(__file__)), "hype3.npy") 4 | -------------------------------------------------------------------------------- /parameters/default.json: -------------------------------------------------------------------------------- 1 | {"use_predef": 2, "lims": [[-5.0, 5.0], [-5.0, 5.0]], "maximization": false, "alarm_min": -1.7, "hyper_file": "devmode", "id": ["sim_device_1", "sim_device_2"], "obj_fun": "sim_objective", "alarm_max": 100.0, "statistics": 6, "checked": [2, 2], "alarm_timeout": 3.0, "fun_e": "", "fun_d": "", "fun_c": "", "fun_b": "", "fun_a": "", "use_live_seed": 0, "isim_rel_step": 100.0, "seed_iter": 8, "alarm_dev": "", "data_points": 120, "max_iter": 10, "set_best_sol": 0, "nreadings": 1, "max_pen": 101, "algorithm": "Nelder-Mead Simplex", "use_isim": 2, "interval": 0.0, "timeout": 1.0} -------------------------------------------------------------------------------- /parameters/fit_params.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/parameters/fit_params.pkl -------------------------------------------------------------------------------- /parameters/fit_params_2017-05_to_2018-01.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/parameters/fit_params_2017-05_to_2018-01.pkl -------------------------------------------------------------------------------- /parameters/fit_params_2018-01_to_2018-01.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/parameters/fit_params_2018-01_to_2018-01.pkl -------------------------------------------------------------------------------- /parameters/fit_params_august.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/parameters/fit_params_august.pkl -------------------------------------------------------------------------------- /parameters/hype3.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/parameters/hype3.npy -------------------------------------------------------------------------------- /parameters/hyperparameters.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/parameters/hyperparameters.npy -------------------------------------------------------------------------------- /parameters/lcls/default.json: -------------------------------------------------------------------------------- 1 | {"use_predef": 2, "lims": [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], "maximization": false, "alarm_min": 0.0, "hyper_file": "/afs/slac.stanford.edu/u/ad/yshtalen/simulator/ocelot/optimizer/parameters/hyperparameters.npy", "id": ["QUAD:LTU1:620:BCTRL", "QUAD:LTU1:640:BCTRL", "QUAD:LTU1:660:BCTRL", "QUAD:LTU1:680:BCTRL", "QUAD:LI26:201:BCTRL", "QUAD:LI26:301:BCTRL", "QUAD:LI26:401:BCTRL"], "obj_fun": "GDET:FEE1:241:ENRCHSTBR", "alarm_max": 0.0, "statistics": 6, "checked": [2, 2, 2, 2, 2, 2, 2], "alarm_timeout": 2.0, "fun_e": "", "fun_d": "", "fun_c": "", "fun_b": "", "fun_a": "", "use_live_seed": 0, "isim_rel_step": 5.0, "seed_iter": 5, "alarm_dev": "", "data_points": 120, "max_iter": 50, "set_best_sol": 2, "nreadings": 1, "max_pen": 100, "algorithm": "Gaussian Process", "use_isim": 0, "interval": 0.0, "timeout": 1.0} -------------------------------------------------------------------------------- /parameters/lcls/lion.pvs: -------------------------------------------------------------------------------- 1 | LION:LTU1:404:IACT 2 | LION:LTU1:405:IACT 3 | LION:LTU1:504:IACT 4 | LION:LTU1:505:IACT 5 | LION:LTU1:584:IACT 6 | LION:LTU1:585:IACT 7 | LION:LTU1:716:IACT 8 | LION:LTU1:717:IACT 9 | LION:LTU1:732:IACT 10 | LION:LTU1:733:IACT 11 | LION:LTU1:752:IACT 12 | LION:LTU1:753:IACT 13 | LION:LTU1:765:IACT 14 | LION:LTU1:766:IACT 15 | LION:EP01:150:VACT 16 | LION:EP01:151:VACT 17 | LION:IN20:650:VACT 18 | LION:IN20:651:VACT 19 | LION:LI19:900:VACT 20 | LION:LI19:901:VACT 21 | LION:LI20:1500:VACT 22 | LION:LI20:1501:VACT 23 | LION:LI20:3120:VACT 24 | LION:LI20:3121:VACT 25 | LION:LI21:162:VACT 26 | LION:LI21:163:VACT 27 | LION:LI24:708:VACT 28 | LION:LI24:709:VACT 29 | PICM:BSY0:23-2:P1_LOSS_1 30 | PICM:BSY0:23-3:P1_LOSS_1 31 | PICM:BSY0:23-4:P1_LOSS_1 32 | PICM:BSY0:23-5:P1_LOSS_1 33 | PICM:BSY0:23-6:P1_LOSS_1 34 | PICM:BSY0:23-7:P1_LOSS_1 35 | PICM:BSY0:23-8:P1_LOSS_1 36 | PICM:BSYA:2061:P1_LOSS_1 37 | PICM:BSYA:2120:P1_LOSS_1 38 | PICM:BSYA:2220:P1_LOSS_1 39 | PICM:BSYA:2321:P1_LOSS_1 40 | PICM:BSYA:2411:P1_LOSS_1 41 | PICM:BSYA:2472:P1_LOSS_1 42 | PICM:BSYA:2473:P1_LOSS_1 43 | PICM:BSYA:2474:P1_LOSS_1 44 | PICM:BSYH:25-1-6:P1_LOSS_1 45 | PICM:BSYH:25-1-7:P1_LOSS_1 46 | PICM:BSYH:25-1-8:P1_LOSS_1 47 | PICM:BSYH:479:P1_LOSS_1 48 | PICM:BSYH:487:P1_LOSS_1 49 | PICM:BSYH:495:P1_LOSS_1 50 | PICM:BSYH:503:P1_LOSS_1 51 | PICM:BSYH:836:P1_LOSS_1 52 | PICM:BSYN:54:P1_LOSS_1 53 | PICM:DMP1:380:P1_LOSS_1 54 | PICM:DMP1:381:P1_LOSS_1 55 | PICM:DMP1:698:P1_LOSS_1 56 | PICM:DMP1:699:P1_LOSS_1 57 | PICM:IN20:11-7:P1_LOSS_1 58 | PICM:IN20:11-8:P1_LOSS_1 59 | PICM:IN20:185:P1_LOSS_1 60 | PICM:IN20:232:P1_LOSS_1 61 | PICM:IN20:245:P1_LOSS_1 62 | PICM:IN20:665:P1_LOSS_1 63 | PICM:IN20:775:P1_LOSS_1 64 | PICM:IN20:933:P1_LOSS_1 65 | PICM:LI24:16-3:P1_LOSS_1 66 | PICM:LI24:16-4:P1_LOSS_1 67 | PICM:LI24:16-5:P1_LOSS_1 68 | PICM:LI24:16-6:P1_LOSS_1 69 | PICM:LI24:16-7:P1_LOSS_1 70 | PICM:LI24:16-8:P1_LOSS_1 71 | PICM:LI24:791:P1_LOSS_1 72 | PICM:LI24:811:P1_LOSS_1 73 | PICM:LI28:916:P1_LOSS_1 74 | PICM:LI29:146:P1_LOSS_1 75 | PICM:LI29:446:P1_LOSS_1 76 | PICM:LI29:546:P1_LOSS_1 77 | PICM:LI29:801:P1_LOSS_1 78 | PICM:LI30:22-1-6:P1_LOSS_1 79 | PICM:LI30:22-1-7:P1_LOSS_1 80 | PICM:LI30:22-1-8:P1_LOSS_1 81 | PICM:LTU1:220:P1_LOSS_1 82 | PICM:LTU1:252:P1_LOSS_1 83 | PICM:LTU1:28-6:P1_LOSS_1 84 | PICM:LTU1:28-7:P1_LOSS_1 85 | PICM:LTU1:28-8:P1_LOSS_1 86 | PICM:LTU1:280:P1_LOSS_1 87 | PICM:LTU1:420:P1_LOSS_1 88 | PICM:LTU1:450:P1_LOSS_1 89 | PICM:LTU1:480:P1_LOSS_1 90 | PICM:LTU1:4:P1_LOSS_1 91 | PICM:LTU1:5:P1_LOSS_1 92 | PICM:LTU1:6:P1_LOSS_1 93 | PICM:LTU1:722:P1_LOSS_1 94 | PICM:LTU1:732:P1_LOSS_1 95 | PICM:LTU1:762:P1_LOSS_1 96 | PICM:LTU1:772:P1_LOSS_1 97 | PICM:LTU1:7:P1_LOSS_1 98 | PICM:LTU1:8:P1_LOSS_1 99 | PICM:UND1:31-3:P1_LOSS_1 100 | PICM:UND1:31-4:P1_LOSS_1 101 | PICM:UND1:31-5:P1_LOSS_1 102 | PICM:UND1:31-6:P1_LOSS_1 103 | PICM:UND1:31-7:P1_LOSS_1 104 | PICM:UND1:31-8:P1_LOSS_1 105 | PICM:UND1:34-5:P1_LOSS_1 106 | PICM:UND1:34-6:P1_LOSS_1 107 | PICM:UND1:34-7:P1_LOSS_1 108 | PICM:UND1:34-8:P1_LOSS_1 109 | -------------------------------------------------------------------------------- /parameters/lclsparams.txt: -------------------------------------------------------------------------------- 1 | QUAD:LTU1:620:BCTRL 2 | QUAD:LTU1:640:BCTRL 3 | QUAD:LTU1:660:BCTRL 4 | QUAD:LTU1:680:BCTRL 5 | QUAD:LI26:201:BCTRL 6 | QUAD:LI26:301:BCTRL 7 | QUAD:LI26:401:BCTRL 8 | QUAD:LI26:501:BCTRL 9 | QUAD:LI26:601:BCTRL 10 | QUAD:LI26:701:BCTRL 11 | QUAD:LI26:801:BCTRL 12 | QUAD:LI26:901:BCTRL 13 | QUAD:LI21:221:BCTRL 14 | QUAD:LI21:251:BCTRL 15 | QUAD:LI24:740:BCTRL 16 | QUAD:LI24:860:BCTRL 17 | QUAD:LTU1:440:BCTRL 18 | QUAD:LTU1:460:BCTRL 19 | QUAD:LI21:201:BCTRL 20 | QUAD:LI21:211:BCTRL 21 | QUAD:LI21:271:BCTRL 22 | QUAD:LI21:278:BCTRL 23 | PHYS:ACR0:OCLT:LINAMP 24 | PHYS:ACR0:OCLT:POSTSATSTART 25 | PHYS:ACR0:OCLT:POSTSATAMP 26 | #QUAD:IN20:361:BCTRL 27 | #QUAD:IN20:371:BCTRL 28 | #QUAD:IN20:425:BCTRL 29 | #QUAD:IN20:441:BCTRL 30 | #QUAD:IN20:511:BCTRL 31 | #QUAD:IN20:525:BCTRL 32 | #XCOR:IN20:951:BCTRL 33 | #YCOR:IN20:952:BCTRL 34 | #SIOC:SYS0:ML00:CALCOUT997 35 | #SIOC:SYS0:ML00:CALCOUT998 36 | #SIOC:SYS0:ML00:CALCOUT999 37 | #SIOC:SYS0:ML00:CALCOUT000 38 | #MKB:SYS0:1:VAL 39 | -------------------------------------------------------------------------------- /parameters/multinormal/default.json: -------------------------------------------------------------------------------- 1 | {"id": ["sim_device_1", "sim_device_2", "sim_device_3", "sim_device_4", "sim_device_5", "sim_device_6", "sim_device_7", "sim_device_8", "sim_device_9", "sim_device_10"], "lims": [[-5.0, 5.0], [-5.0, 5.0], [-5.0, 5.0], [-5.0, 5.0], [-5.0, 5.0], [-5.0, 5.0], [-5.0, 5.0], [-5.0, 5.0], [-5.0, 5.0], [-5.0, 5.0]], "checked": [2, 2, 2, 2, 2, 2, 2, 2, 2, 2], "use_predef": 2, "statistics": 6, "data_points": 120, "max_pen": 100, "timeout": 0.1, "nreadings": 5, "interval": 0.1, "max_iter": 100, "fun_a": "", "fun_b": "", "fun_c": "", "fun_d": "", "fun_e": "", "obj_fun": "sim_objective", "alarm_dev": "", "alarm_min": 94.0, "alarm_max": 100.0, "alarm_timeout": 2.0, "seed_iter": 10, "use_live_seed": 0, "isim_rel_step": 5.0, "use_isim": 2, "hyper_file": "devmode", "set_best_sol": 0, "algorithm": "Nelder-Mead Simplex", "maximization": true} -------------------------------------------------------------------------------- /parameters/simSeed.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/parameters/simSeed.mat -------------------------------------------------------------------------------- /parameters/spear/default.json: -------------------------------------------------------------------------------- 1 | {"use_predef": 2, "lims": [[-30.0, 30.0], [-30.0, 30.0], [-30.0, 30.0], [-30.0, 30.0], [-30.0, 30.0], [-30.0, 30.0], [-30.0, 30.0], [-30.0, 30.0], [-30.0, 30.0], [-30.0, 30.0], [-30.0, 30.0], [-30.0, 30.0], [-30.0, 30.0]], "maximization": true, "alarm_min": 0.0, "hyper_file": "/accphys/matlab/shifts/xiahuang/2019-Run/2019-7-1/ocleot/parameters/hyperparameters.npy", "id": ["01G-QSS4:Curr1", "02G-QSS3:Curr1", "05G-QSS3:Curr1", "07G-QSS2:Curr1", "08G-QSS2:Curr1", "09G-QSS1:Curr1", "10G-QSS4:Curr1", "11G-QSS3:Curr1", "12G-QSS3:Curr1", "14G-QSS2:Curr1", "16G-QSS2:Curr1", "17G-QSS2:Curr1", "18G-QSS1:Curr1"], "obj_fun": "SPEAR:BeamCurrAvg", "alarm_max": 0.0, "statistics": 6, "checked": [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2], "alarm_timeout": 2.0, "fun_e": "", "fun_d": "", "fun_c": "", "fun_b": "", "fun_a": "", "use_live_seed": 0, "isim_rel_step": 5.0, "seed_iter": 5, "alarm_dev": "", "data_points": 1, "max_iter": 50, "set_best_sol": 2, "nreadings": 1, "max_pen": 100, "algorithm": "Gaussian Process", "use_isim": 0, "interval": 0.0, "timeout": 1.0} 2 | -------------------------------------------------------------------------------- /parameters/spear/spear_hyperparams.pkl: -------------------------------------------------------------------------------- 1 | (dp0 2 | S'08G-QSS2:Curr1' 3 | p1 4 | F1.0 5 | sS'02G-QSS3:Curr1' 6 | p2 7 | F1.0 8 | sS'12G-QSS3:Curr1' 9 | p3 10 | F1.0 11 | sS'noise' 12 | p4 13 | F0.0001 14 | sS'01G-QSS4:Curr1' 15 | p5 16 | F1.0 17 | sS'05G-QSS3:Curr1' 18 | p6 19 | F1.0 20 | sS'14G-QSS2:Curr1' 21 | p7 22 | F1.0 23 | sS'17G-QSS2:Curr1' 24 | p8 25 | F1.0 26 | sS'11G-QSS3:Curr1' 27 | p9 28 | F1.0 29 | sS'09G-QSS1:Curr1' 30 | p10 31 | F1.0 32 | sS'07G-QSS2:Curr1' 33 | p11 34 | F1.0 35 | sS'amp' 36 | p12 37 | F0.01 38 | sS'16G-QSS2:Curr1' 39 | p13 40 | F1.0 41 | sS'18G-QSS1:Curr1' 42 | p14 43 | F1.0 44 | sS'10G-QSS4:Curr1' 45 | p15 46 | F1.0 47 | s. -------------------------------------------------------------------------------- /parameters/test.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/parameters/test.db -------------------------------------------------------------------------------- /resetpanel/UIareyousure.ui: -------------------------------------------------------------------------------- 1 | 2 | 3 | Form 4 | 5 | 6 | Qt::ApplicationModal 7 | 8 | 9 | 10 | 0 11 | 0 12 | 514 13 | 112 14 | 15 | 16 | 17 | ResetPanel Dialog - Are you sure? 18 | 19 | 20 | background-color: white 21 | 22 | 23 | 24 | 5 25 | 26 | 27 | 5 28 | 29 | 30 | 5 31 | 32 | 33 | 5 34 | 35 | 36 | 37 | 38 | 39 | 16 40 | 75 41 | true 42 | 43 | 44 | 45 | background-color: rgb(255, 0,0); 46 | color: white 47 | 48 | 49 | Are you sure you want to implement 50 | changes to all PVs? 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 12 59 | 75 60 | true 61 | 62 | 63 | 64 | color: red 65 | 66 | 67 | Reset 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 12 76 | 75 77 | true 78 | 79 | 80 | 81 | Exit 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | -------------------------------------------------------------------------------- /resetpanel/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/resetpanel/__init__.py -------------------------------------------------------------------------------- /sint/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/sint/__init__.py -------------------------------------------------------------------------------- /sint/multinormal/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/sint/multinormal/__init__.py -------------------------------------------------------------------------------- /sint/multinormal/multinormal_devices.py: -------------------------------------------------------------------------------- 1 | from mint.opt_objects import Device 2 | 3 | 4 | class MultinormalDevice(Device): 5 | def __init__(self, eid=None): 6 | super(MultinormalDevice, self).__init__(eid=eid) 7 | self.value_percent = 25.0 8 | self.range_percent = 2.0 9 | self.default_limits = [-5, 5] 10 | self.low_limit = -5 11 | self.high_limit = 5 12 | 13 | def get_delta(self): 14 | """ 15 | Calculate and return the travel range for this device. 16 | 17 | :return: (float) Travel Range 18 | """ 19 | ll, hl = self.get_limits() 20 | val = self.get_value() 21 | 22 | # Method 1: % of Range 23 | m1 = (hl-ll)*self.range_percent/100.0 24 | 25 | # Method 2: % of Current Value 26 | m2 = val*self.value_percent/100.0 27 | 28 | # Method 3: Mean(M1, M2) 29 | m3 = (m1+m2)/2.0 30 | 31 | if m1 != 0.0 and m2 != 0.0: 32 | return m3 33 | if m1 == 0: 34 | return m2 35 | else: 36 | return m1 37 | 38 | def get_limits(self): 39 | return self.low_limit, self.high_limit 40 | 41 | def set_low_limit(self, val): 42 | if val >= self.high_limit-0.0001: 43 | return 44 | if val >= self.default_limits[0]: 45 | self.low_limit = val 46 | else: 47 | self.low_limit = self.default_limits[0] 48 | 49 | def set_high_limit(self, val): 50 | if val <= self.low_limit+0.0001: 51 | return 52 | if val <= self.default_limits[1]: 53 | self.high_limit = val 54 | else: 55 | self.high_limit = self.default_limits[1] 56 | -------------------------------------------------------------------------------- /sint/multinormal/multinormal_obj_function.py: -------------------------------------------------------------------------------- 1 | import time 2 | import numpy as np 3 | from scipy.special import gamma 4 | from scipy.special import erfinv 5 | 6 | from mint.opt_objects import Target 7 | import stats.stats as stats 8 | 9 | 10 | class MultinormalTarget(Target): 11 | def __init__(self, mi=None, eid='sim_objective'): 12 | """ 13 | :param mi: Machine interface 14 | :param eid: ID 15 | """ 16 | super(MultinormalTarget, self).__init__(eid=eid) 17 | 18 | self.mi = mi 19 | self.kill = False 20 | self.objective_acquisition = None 21 | self.objective_mean = None 22 | self.objective_stdev = None 23 | 24 | self.objective_acquisitions = [] # all the points 25 | self.objective_means = [] 26 | self.std_dev = [] 27 | self.charge = [] 28 | self.current = [] 29 | self.losses = [] 30 | self.points = None 31 | self.initialize = True 32 | 33 | 34 | def get_penalty(self): 35 | sase, std, charge, current, losses = self.get_value() 36 | alarm = self.get_alarm() 37 | pen = 0.0 38 | if alarm > 1.0: 39 | return self.pen_max 40 | if alarm > 0.7: 41 | return alarm * 50.0 42 | pen += alarm 43 | pen -= sase 44 | self.penalties.append(pen) 45 | self.times.append(time.time()) 46 | self.values.append(sase) # statistic 47 | self.objective_acquisitions.append( 48 | self.objective_acquisition) # array of points 49 | self.objective_means.append(self.objective_mean) 50 | self.std_dev.append(std) 51 | self.alarms.append(alarm) 52 | self.charge.append(charge) 53 | self.current.append(current) 54 | self.losses.append(losses) 55 | self.niter += 1 56 | return pen 57 | 58 | def get_value(self): 59 | """ 60 | Returns data for the ojective function (sase) from the selected detector PV. 61 | 62 | At lcls the repetition is 120Hz and the readout buf size is 2800. 63 | The last 120 entries correspond to pulse energies over past 1 second. 64 | 65 | Returns: 66 | Float of SASE or other detecor measurement 67 | """ 68 | if self.points is None: 69 | self.points = 120 70 | self.mi.points = self.points 71 | # print("Get Value of : ", self.points, " points.") 72 | 73 | data = self.mi.f(self.mi.x) 74 | # print("Data (", data.shape, ") : ", data) 75 | 76 | if self.stats is None: 77 | self.stats = stats.StatNone 78 | 79 | self.objective_acquisition = data 80 | self.objective_mean = np.mean(self.objective_acquisition) 81 | self.objective_stdev = np.std(self.objective_acquisition) 82 | self.statistic = self.stats.compute(data) 83 | 84 | print( 85 | self.stats.display_name, ' of ', self.points, 86 | ' points is ', self.statistic, 87 | ' and standard deviation is ', self.objective_stdev) 88 | 89 | charge, current = self.mi.get_charge_current() 90 | losses = self.mi.get_losses() 91 | return self.statistic, self.objective_stdev, charge, current, losses 92 | 93 | def clean(self): 94 | Target.clean(self) 95 | self.objective_acquisitions = [] # all the points 96 | self.objective_means = [] 97 | self.std_dev = [] 98 | self.charge = [] 99 | self.current = [] 100 | self.losses = [] 101 | 102 | def get_energy(self): 103 | return 7 # GeV 104 | -------------------------------------------------------------------------------- /sound_off.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /sound_on.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /stats/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/stats/__init__.py -------------------------------------------------------------------------------- /stats/stats.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class Statistics(object): 5 | """ 6 | Base class for all statistical methods to be applied to data collected. 7 | """ 8 | display_name = "Statistics" 9 | 10 | @staticmethod 11 | def compute(data): 12 | """ 13 | Compute the statistics and returns the computed value. 14 | 15 | :param data: The data to be computed. 16 | """ 17 | raise NotImplementedError 18 | 19 | 20 | class StatNone(Statistics): 21 | """ 22 | Empty Statistic. 23 | """ 24 | display_name = "None" 25 | 26 | @staticmethod 27 | def compute(data): 28 | """ 29 | Don't compute a statistic, just return the data as is. 30 | 31 | :return: data 32 | """ 33 | return data 34 | 35 | 36 | class StatMedian(Statistics): 37 | display_name = "Median" 38 | 39 | @staticmethod 40 | def compute(data): 41 | return np.median(data) 42 | 43 | 44 | class StatStdDeviation(Statistics): 45 | display_name = "Standard Deviation" 46 | 47 | @staticmethod 48 | def compute(data): 49 | return np.std(data) 50 | 51 | 52 | class StatMedianDeviation(Statistics): 53 | display_name = "Median Deviation" 54 | 55 | @staticmethod 56 | def compute(data): 57 | median = np.median(data) 58 | return np.median(np.abs(data - median)) 59 | 60 | 61 | class StatMax(Statistics): 62 | display_name = "Max" 63 | 64 | @staticmethod 65 | def compute(data): 66 | return np.max(data) 67 | 68 | 69 | class StatMin(Statistics): 70 | display_name = "Min" 71 | 72 | @staticmethod 73 | def compute(data): 74 | return np.min(data) 75 | 76 | 77 | class Stat80Percent(Statistics): 78 | display_name = "80th percentile" 79 | 80 | @staticmethod 81 | def compute(data): 82 | return np.percentile(data, 80) 83 | 84 | 85 | class StatAvgMean(Statistics): 86 | display_name = "Avg. of points > mean" 87 | 88 | @staticmethod 89 | def compute(data): 90 | percentile = np.percentile(data, 50) 91 | return np.mean(data[data > percentile]) 92 | 93 | 94 | class Stat20Percent(Statistics): 95 | display_name = "20th percentile" 96 | 97 | @staticmethod 98 | def compute(data): 99 | return np.percentile(data, 20) 100 | 101 | 102 | class StatMean(Statistics): 103 | display_name = "Mean" 104 | 105 | @staticmethod 106 | def compute(data): 107 | return np.mean(data) 108 | 109 | 110 | all_stats = [StatNone, StatMedian, StatStdDeviation, StatMedianDeviation, StatMax, StatMin, Stat80Percent, 111 | StatAvgMean, Stat20Percent, StatMean] 112 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ocelot-collab/optimizer/4eeec503a1777712891c0e095281749fdc094d12/utils/__init__.py --------------------------------------------------------------------------------