├── cobra ├── VERSION ├── oven │ ├── aliebrahim │ │ ├── __init__.py │ │ ├── designAnalysis.py │ │ ├── keggIO.py │ │ └── simphenyIO.py │ ├── danielhyduke │ │ ├── __init__.py │ │ ├── jython │ │ │ ├── __init__.py │ │ │ ├── scipy │ │ │ │ ├── __init__.py │ │ │ │ ├── README │ │ │ │ └── sparse │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── sputils.py │ │ │ ├── numpy │ │ │ │ ├── core │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── Matrix.py │ │ │ │ ├── README │ │ │ │ └── __init__.py │ │ │ └── README │ │ ├── general │ │ │ ├── __init__.py │ │ │ └── arrays.py │ │ ├── query │ │ │ ├── __init__.py │ │ │ └── query.py │ │ └── construction │ │ │ └── omics_guided.py │ ├── WARNING │ ├── __init__.py │ └── README ├── design │ └── __init__.py ├── test │ ├── data │ │ ├── mini.mat │ │ ├── mini.pickle │ │ ├── iJO1366.pickle │ │ ├── mini_fbc2.xml.bz2 │ │ ├── mini_fbc2.xml.gz │ │ ├── salmonella.pickle │ │ ├── textbook.xml.gz │ │ ├── invalid1.xml │ │ ├── invalid0.xml │ │ ├── update_pickles.py │ │ ├── invalid2.xml │ │ ├── textbook_fva.json │ │ └── salmonella.media │ ├── __init__.py │ └── design.py ├── clean.sh ├── topology │ ├── __init__.py │ └── reporter_metabolites.py ├── manipulation │ ├── __init__.py │ ├── annotate.py │ └── validate.py ├── core │ ├── __init__.py │ ├── Object.py │ ├── Solution.py │ ├── Species.py │ └── Formula.py ├── io │ └── __init__.py ├── flux_analysis │ ├── __init__.py │ ├── loopless.py │ ├── essentiality.py │ ├── parsimonious.py │ ├── variability.py │ ├── moma.py │ └── deletion_worker.py ├── __init__.py ├── solvers │ ├── wrappers.py │ ├── __init__.py │ ├── coin.py │ ├── glpk.pxd │ └── esolver.py └── version.py ├── documentation_builder ├── requirements.txt ├── autodoc.sh ├── cobra.rst ├── cobra.design.rst ├── cobra.topology.rst ├── index.rst ├── cobra.io.rst ├── cobra.manipulation.rst ├── cobra.core.rst ├── cobra.flux_analysis.rst ├── plot_helper.py ├── conf.py ├── make.bat ├── gapfilling.ipynb ├── Makefile └── pymatbridge.ipynb ├── MANIFEST.in ├── manylinux_builder ├── run_cobrapy_builder.sh ├── Dockerfile ├── README.md └── build_cobrapy.sh ├── .coveragerc ├── .travis.yml ├── CONTRIBUTING.md ├── .gitignore ├── appveyor ├── build_glpk.py ├── install.ps1 └── run_with_env.cmd ├── appveyor.yml ├── README.md └── INSTALL.md /cobra/VERSION: -------------------------------------------------------------------------------- 1 | 0.4.2b1 2 | -------------------------------------------------------------------------------- /cobra/oven/aliebrahim/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cobra/oven/danielhyduke/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cobra/oven/danielhyduke/jython/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cobra/oven/danielhyduke/jython/scipy/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cobra/design/__init__.py: -------------------------------------------------------------------------------- 1 | from .design_algorithms import * 2 | -------------------------------------------------------------------------------- /cobra/oven/danielhyduke/general/__init__.py: -------------------------------------------------------------------------------- 1 | from arrays import * 2 | -------------------------------------------------------------------------------- /cobra/oven/danielhyduke/query/__init__.py: -------------------------------------------------------------------------------- 1 | from query import * 2 | -------------------------------------------------------------------------------- /documentation_builder/requirements.txt: -------------------------------------------------------------------------------- 1 | nbsphinx>=0.2.4 2 | ipykernel 3 | -------------------------------------------------------------------------------- /cobra/test/data/mini.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Debian/cobrapy/master/cobra/test/data/mini.mat -------------------------------------------------------------------------------- /cobra/clean.sh: -------------------------------------------------------------------------------- 1 | find . -type f -regex '.*pyc' | xargs rm 2 | find . -type f -regex '.*class' | xargs rm 3 | -------------------------------------------------------------------------------- /cobra/test/data/mini.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Debian/cobrapy/master/cobra/test/data/mini.pickle -------------------------------------------------------------------------------- /cobra/test/data/iJO1366.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Debian/cobrapy/master/cobra/test/data/iJO1366.pickle -------------------------------------------------------------------------------- /cobra/test/data/mini_fbc2.xml.bz2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Debian/cobrapy/master/cobra/test/data/mini_fbc2.xml.bz2 -------------------------------------------------------------------------------- /cobra/test/data/mini_fbc2.xml.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Debian/cobrapy/master/cobra/test/data/mini_fbc2.xml.gz -------------------------------------------------------------------------------- /cobra/test/data/salmonella.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Debian/cobrapy/master/cobra/test/data/salmonella.pickle -------------------------------------------------------------------------------- /cobra/test/data/textbook.xml.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Debian/cobrapy/master/cobra/test/data/textbook.xml.gz -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md INSTALL.md LICENSE ez_setup.py 2 | include cobra/solvers/cglpk.pyx cobra/solvers/glpk.pxd 3 | -------------------------------------------------------------------------------- /cobra/oven/WARNING: -------------------------------------------------------------------------------- 1 | THIS DIRECTORY IS SUBJECT TO RANDOM MUTATIONS THAT ARE AESTHETICALLY APPEALING TO YOUR FRIENDLY FASCIST. 2 | -------------------------------------------------------------------------------- /cobra/oven/danielhyduke/jython/numpy/core/__init__.py: -------------------------------------------------------------------------------- 1 | from multiarray import * 2 | from core import * 3 | #from numeric import * 4 | -------------------------------------------------------------------------------- /manylinux_builder/run_cobrapy_builder.sh: -------------------------------------------------------------------------------- 1 | docker build -t cobrapy_builder . 2 | docker run --rm -v `pwd`:/io cobrapy_builder /io/build_cobrapy.sh 3 | -------------------------------------------------------------------------------- /cobra/oven/danielhyduke/jython/README: -------------------------------------------------------------------------------- 1 | Section dedicated to creating scipy/numpy for jython interface using cern.colt. This will not be ready any time soon. 2 | -------------------------------------------------------------------------------- /cobra/oven/__init__.py: -------------------------------------------------------------------------------- 1 | from warnings import warn 2 | warn("Functions in cobra.oven are still being baked thus are not officially supported and may not function") 3 | -------------------------------------------------------------------------------- /cobra/oven/danielhyduke/jython/numpy/README: -------------------------------------------------------------------------------- 1 | In the future, we will be working on a numpy for java implementation that uses cern.colt matrices as the backend and provides a java interface that mirrors numpy. 2 | -------------------------------------------------------------------------------- /cobra/oven/danielhyduke/jython/scipy/README: -------------------------------------------------------------------------------- 1 | In the future, we will be working on a scipy for java implementation that uses cern.colt matrices as the backend and provides a java interface that mirrors scipy. 2 | -------------------------------------------------------------------------------- /manylinux_builder/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM quay.io/pypa/manylinux1_x86_64 2 | 3 | ENV GLPK_VER="4.60" 4 | RUN wget http://ftp.gnu.org/gnu/glpk/glpk-${GLPK_VER}.tar.gz -O - | tar xz 5 | WORKDIR glpk-${GLPK_VER} 6 | RUN ./configure && make install 7 | -------------------------------------------------------------------------------- /cobra/oven/danielhyduke/jython/numpy/__init__.py: -------------------------------------------------------------------------------- 1 | from cern.colt.matrix.impl import DenseDoubleMatrix2D as ndarray 2 | from cern.colt.matrix.impl import SparseDoubleMatrix2D as sdarray 3 | from core import * 4 | #import core 5 | #from core import * 6 | -------------------------------------------------------------------------------- /documentation_builder/autodoc.sh: -------------------------------------------------------------------------------- 1 | rm cobra.rst cobra.*.rst 2 | sphinx-apidoc -o . ../cobra ../cobra/oven ../cobra/external \ 3 | ../cobra/test ../cobra/solvers/ ../cobra/test_all.py \ 4 | ../cobra/version.py ../cobra/solvers/legacy.py 5 | rm modules.rst 6 | -------------------------------------------------------------------------------- /manylinux_builder/README.md: -------------------------------------------------------------------------------- 1 | This script uses docker to build manylinux wheels for cobrapy. It only 2 | requires a working docker installation. 3 | 4 | To build manylinux wheels, run ```./run_cobrapy_builder.sh```. The 5 | built wheels will then be placed in the ```wheelhouse``` folder. 6 | -------------------------------------------------------------------------------- /manylinux_builder/build_cobrapy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | for PYBIN in /opt/python/*/bin; do 4 | ${PYBIN}/pip wheel cobra --pre 5 | done 6 | 7 | # Bundle external shared libraries into the wheels 8 | for whl in cobra*.whl; do 9 | auditwheel repair $whl -w /io/wheelhouse/ 10 | done 11 | 12 | -------------------------------------------------------------------------------- /cobra/topology/__init__.py: -------------------------------------------------------------------------------- 1 | from os import name as __name 2 | from sys import modules as __modules 3 | from warnings import warn 4 | if __name == 'java': 5 | warn("%s is not yet supported on jython" % __modules[__name__]) 6 | 7 | else: 8 | from .reporter_metabolites import * 9 | del __name, __modules 10 | -------------------------------------------------------------------------------- /documentation_builder/cobra.rst: -------------------------------------------------------------------------------- 1 | cobra package 2 | ============= 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | cobra.core 10 | cobra.design 11 | cobra.flux_analysis 12 | cobra.io 13 | cobra.manipulation 14 | cobra.topology 15 | 16 | Module contents 17 | --------------- 18 | 19 | .. automodule:: cobra 20 | :members: 21 | :undoc-members: 22 | :show-inheritance: 23 | -------------------------------------------------------------------------------- /cobra/manipulation/__init__.py: -------------------------------------------------------------------------------- 1 | from .delete import delete_model_genes, undelete_model_genes, remove_genes, \ 2 | find_gene_knockout_reactions 3 | from .modify import initialize_growth_medium, convert_to_irreversible, \ 4 | revert_to_reversible, escape_ID, canonical_form, \ 5 | get_compiled_gene_reaction_rules 6 | from .annotate import add_SBO 7 | from .validate import check_mass_balance, check_reaction_bounds, \ 8 | check_metabolite_compartment_formula 9 | -------------------------------------------------------------------------------- /documentation_builder/cobra.design.rst: -------------------------------------------------------------------------------- 1 | cobra.design package 2 | ==================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | cobra.design.design_algorithms module 8 | ------------------------------------- 9 | 10 | .. automodule:: cobra.design.design_algorithms 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | 16 | Module contents 17 | --------------- 18 | 19 | .. automodule:: cobra.design 20 | :members: 21 | :undoc-members: 22 | :show-inheritance: 23 | -------------------------------------------------------------------------------- /documentation_builder/cobra.topology.rst: -------------------------------------------------------------------------------- 1 | cobra.topology package 2 | ====================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | cobra.topology.reporter_metabolites module 8 | ------------------------------------------ 9 | 10 | .. automodule:: cobra.topology.reporter_metabolites 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | 16 | Module contents 17 | --------------- 18 | 19 | .. automodule:: cobra.topology 20 | :members: 21 | :undoc-members: 22 | :show-inheritance: 23 | -------------------------------------------------------------------------------- /cobra/oven/danielhyduke/jython/scipy/sparse/__init__.py: -------------------------------------------------------------------------------- 1 | "Sparse Matrix Support" 2 | 3 | from info import __doc__ 4 | 5 | from base import * 6 | from csr import * 7 | from csc import * 8 | from lil import * 9 | from dok import * 10 | from coo import * 11 | from dia import * 12 | from bsr import * 13 | 14 | from construct import * 15 | from extract import * 16 | 17 | #from spfuncs import * 18 | 19 | __all__ = filter(lambda s:not s.startswith('_'),dir()) 20 | from numpy.testing import Tester 21 | test = Tester().test 22 | bench = Tester().bench 23 | -------------------------------------------------------------------------------- /cobra/core/__init__.py: -------------------------------------------------------------------------------- 1 | from .DictList import DictList 2 | from .Object import Object 3 | from .Gene import Gene 4 | from .Metabolite import Metabolite 5 | from .Reaction import Reaction 6 | from .Solution import Solution 7 | from .Model import Model 8 | from .Species import Species 9 | 10 | try: 11 | import scipy 12 | except: 13 | scipy = None 14 | 15 | if scipy: 16 | from .ArrayBasedModel import ArrayBasedModel 17 | else: 18 | from warnings import warn 19 | warn("ArrayBasedModel requires scipy") 20 | del warn 21 | del scipy 22 | -------------------------------------------------------------------------------- /cobra/io/__init__.py: -------------------------------------------------------------------------------- 1 | from warnings import warn 2 | 3 | from .sbml3 import read_sbml_model, write_sbml_model 4 | from .json import load_json_model, save_json_model, to_json 5 | 6 | # These functions have other dependencies 7 | try: 8 | import libsbml 9 | except ImportError: 10 | warn("cobra.io.sbml requires libsbml") 11 | libsbml = None 12 | else: 13 | from .sbml import read_legacy_sbml 14 | from .sbml import write_cobra_model_to_sbml_file as write_legacy_sbml 15 | 16 | try: 17 | import scipy 18 | except ImportError: 19 | warn("cobra.io.mat requires scipy") 20 | scipy = None 21 | else: 22 | from .mat import load_matlab_model, save_matlab_model 23 | 24 | del libsbml, scipy, warn 25 | -------------------------------------------------------------------------------- /cobra/core/Object.py: -------------------------------------------------------------------------------- 1 | class Object(object): 2 | """Defines common behavior of object in cobra.core""" 3 | 4 | def __init__(self, id=None, name=""): 5 | """ 6 | id: None or a string 7 | 8 | """ 9 | self.id = id 10 | self.name = name 11 | 12 | self.notes = {} 13 | self.annotation = {} 14 | 15 | def __getstate__(self): 16 | """To prevent excessive replication during deepcopy.""" 17 | state = self.__dict__.copy() 18 | if '_model' in state: 19 | state['_model'] = None 20 | return state 21 | 22 | def __repr__(self): 23 | return "<%s %s at 0x%x>" % (self.__class__.__name__, self.id, id(self)) 24 | 25 | def __str__(self): 26 | return str(self.id) 27 | -------------------------------------------------------------------------------- /cobra/flux_analysis/__init__.py: -------------------------------------------------------------------------------- 1 | try: 2 | import numpy 3 | except: 4 | numpy = None 5 | 6 | from .essentiality import assess_medium_component_essentiality 7 | from .variability import flux_variability_analysis, find_blocked_reactions 8 | from .single_deletion import single_gene_deletion, single_reaction_deletion 9 | from .parsimonious import optimize_minimal_flux 10 | from .loopless import construct_loopless_model 11 | from .gapfilling import growMatch 12 | 13 | if numpy: 14 | from .double_deletion import double_reaction_deletion, double_gene_deletion 15 | from .phenotype_phase_plane import calculate_phenotype_phase_plane 16 | else: 17 | from warnings import warn 18 | warn("double deletions and phase planes requires numpy") 19 | del warn 20 | del numpy 21 | -------------------------------------------------------------------------------- /cobra/oven/danielhyduke/construction/omics_guided.py: -------------------------------------------------------------------------------- 1 | #cobra.manipulation.omics_guided.py 2 | #The functions for omics_guided tailoring will be kept here. 3 | def tailor_model(cobra_model, the_method='GIMME', data_type='mRNA', data_kind='log_ratio', 4 | solver='glpk', the_problem='return' ): 5 | """ 6 | 7 | the_method: Type of tailoring to employ. GIMME or shlomi. 8 | data_type: 'mRNA', 'protein', 'metabolite', ... 9 | data_kind: 'p-value','log_ratio': assumed vs control, 'intensity' 10 | solver: 'glpk' or 'gurobi' 11 | 12 | 13 | """ 14 | cobra_model = cobra_model.copy() 15 | print 'Under development' 16 | return 17 | 18 | #function [reactionActivity,reactionActivityIrrev,model2gimme,gimmeSolution] = solveGimme(model,objectiveCol,expressionCol,cutoff) 19 | -------------------------------------------------------------------------------- /documentation_builder/index.rst: -------------------------------------------------------------------------------- 1 | Documentation for COBRApy 2 | ========================= 3 | For installation instructions, please see `INSTALL.md 4 | `_. 5 | 6 | Many of the examples below are viewable as IPython notebooks, which can 7 | be viewed at `nbviewer 8 | `_. 9 | 10 | .. toctree:: 11 | :numbered: 12 | :maxdepth: 2 13 | 14 | getting_started 15 | building_model 16 | io 17 | simulating 18 | deletions 19 | phenotype_phase_plane 20 | milp 21 | qp 22 | loopless 23 | gapfilling 24 | solvers 25 | pymatbridge 26 | faq 27 | cobra 28 | 29 | 30 | Indices and tables 31 | ================== 32 | 33 | * :ref:`genindex` 34 | * :ref:`modindex` 35 | * :ref:`search` 36 | 37 | -------------------------------------------------------------------------------- /documentation_builder/cobra.io.rst: -------------------------------------------------------------------------------- 1 | cobra.io package 2 | ================ 3 | 4 | Submodules 5 | ---------- 6 | 7 | cobra.io.json module 8 | -------------------- 9 | 10 | .. automodule:: cobra.io.json 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | cobra.io.mat module 16 | ------------------- 17 | 18 | .. automodule:: cobra.io.mat 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | cobra.io.sbml module 24 | -------------------- 25 | 26 | .. automodule:: cobra.io.sbml 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | cobra.io.sbml3 module 32 | --------------------- 33 | 34 | .. automodule:: cobra.io.sbml3 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | 40 | Module contents 41 | --------------- 42 | 43 | .. automodule:: cobra.io 44 | :members: 45 | :undoc-members: 46 | :show-inheritance: 47 | -------------------------------------------------------------------------------- /cobra/manipulation/annotate.py: -------------------------------------------------------------------------------- 1 | from six import iteritems 2 | 3 | 4 | def add_SBO(model): 5 | """adds SBO terms for demands and exchanges 6 | 7 | This works for models which follow the standard convention for 8 | constructing and naming these reactions. 9 | 10 | The reaction should only contain the single metabolite being exchanged, 11 | and the id should be EX_metid or DM_metid 12 | """ 13 | for r in model.reactions: 14 | # don't annotate already annotated reactions 15 | if r.annotation.get("SBO"): 16 | continue 17 | # only doing exchanges 18 | if len(r.metabolites) != 1: 19 | continue 20 | met_id = list(r._metabolites)[0].id 21 | if r.id.startswith("EX_") and r.id == "EX_" + met_id: 22 | r.annotation["SBO"] = "SBO:0000627" 23 | elif r.id.startswith("DM_") and r.id == "DM_" + met_id: 24 | r.annotation["SBO"] = "SBO:0000628" 25 | -------------------------------------------------------------------------------- /cobra/__init__.py: -------------------------------------------------------------------------------- 1 | # set the warning format to be on a single line 2 | import warnings as _warnings 3 | from os.path import abspath as _abspath, dirname as _dirname 4 | from os import name as _name 5 | 6 | from .version import get_version 7 | from .core import Object, Metabolite, Gene, Reaction, Model, \ 8 | DictList, Species 9 | from . import io, flux_analysis, design 10 | 11 | try: 12 | from .core import ArrayBasedModel 13 | except ImportError: 14 | None 15 | 16 | __version__ = get_version() 17 | del get_version 18 | 19 | # set the warning format to be prettier and fit on one line 20 | _cobra_path = _dirname(_abspath(__file__)) 21 | if _name == "posix": 22 | _warning_base = "%s:%s \x1b[1;31m%s\x1b[0m: %s\n" # colors 23 | else: 24 | _warning_base = "%s:%s %s: %s\n" 25 | 26 | 27 | def _warn_format(message, category, filename, lineno, file=None, line=None): 28 | shortname = filename.replace(_cobra_path, "cobra", 1) 29 | return _warning_base % (shortname, lineno, category.__name__, message) 30 | _warnings.formatwarning = _warn_format 31 | -------------------------------------------------------------------------------- /documentation_builder/cobra.manipulation.rst: -------------------------------------------------------------------------------- 1 | cobra.manipulation package 2 | ========================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | cobra.manipulation.annotate module 8 | ---------------------------------- 9 | 10 | .. automodule:: cobra.manipulation.annotate 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | cobra.manipulation.delete module 16 | -------------------------------- 17 | 18 | .. automodule:: cobra.manipulation.delete 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | cobra.manipulation.modify module 24 | -------------------------------- 25 | 26 | .. automodule:: cobra.manipulation.modify 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | cobra.manipulation.validate module 32 | ---------------------------------- 33 | 34 | .. automodule:: cobra.manipulation.validate 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | 40 | Module contents 41 | --------------- 42 | 43 | .. automodule:: cobra.manipulation 44 | :members: 45 | :undoc-members: 46 | :show-inheritance: 47 | -------------------------------------------------------------------------------- /cobra/oven/danielhyduke/general/arrays.py: -------------------------------------------------------------------------------- 1 | from numpy import ndarray 2 | class ResultsArray(ndarray): 3 | """A primitive wrapper to allow accessing numpy.ndarrays via 4 | named rows and columns. The ResultsArray.row_names and 5 | column_names must be assigned after the object is created. 6 | 7 | The names will not carry over for any operations. 8 | 9 | TODO: Finish the implementation 10 | 11 | """ 12 | def __init__(self, shape, row_names=None, column_names=None): 13 | ndarray.__init__(shape) 14 | if row_names: 15 | self.row_names = row_names 16 | else: 17 | self.row_names = range(shape[0]) 18 | if column_names: 19 | self.column_names = column_names 20 | else: 21 | column_names = range(shape[1]) 22 | def get(self, row_name=None, column_name=None): 23 | if row_name: 24 | the_row = self.row_names.index(row_name) 25 | if column_name: 26 | the_column = self.column_names.index(column_name) 27 | if row_name and column_name: 28 | return self[the_row, the_column] 29 | if not row_name: 30 | return self[:, the_column] 31 | if not column_name: 32 | return self[the_row, :] 33 | -------------------------------------------------------------------------------- /cobra/solvers/wrappers.py: -------------------------------------------------------------------------------- 1 | """ 2 | Wrappers for solvers with an object oriented interface. This creates 3 | functions to call the objects' functions. 4 | 5 | The create_problem and solve functions are not included because they 6 | are classmethods. They should be included by specifying 7 | create_problem = PROBLEM_CLASS.create_problem 8 | where PROBLEM_CLASS is the solver class (i.e. GLP, esolver, etc.) 9 | """ 10 | 11 | 12 | def set_objective_sense(lp, objective_sense="maximize"): 13 | return lp.set_objective_sense(lp, objective_sense=objective_sense) 14 | 15 | 16 | def change_variable_bounds(lp, *args): 17 | return lp.change_variable_bounds(*args) 18 | 19 | 20 | def change_variable_objective(lp, *args): 21 | return lp.change_variable_objective(*args) 22 | 23 | 24 | def change_coefficient(lp, *args): 25 | return lp.change_coefficient(*args) 26 | 27 | 28 | def set_parameter(lp, parameter_name, value): 29 | return lp.change_parameter(parameter_name, value) 30 | 31 | 32 | def solve_problem(lp, **kwargs): 33 | return lp.solve_problem(**kwargs) 34 | 35 | 36 | def get_status(lp): 37 | return lp.get_status() 38 | 39 | 40 | def get_objective_value(lp): 41 | return lp.get_objective_value() 42 | 43 | 44 | def format_solution(lp, cobra_model): 45 | return lp.format_solution(cobra_model) 46 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | # .coveragerc to control coverage.py 2 | [run] 3 | branch = True 4 | source = cobra 5 | omit = 6 | cobra/solvers/* 7 | cobra/test/data/* 8 | cobra/test_all.py 9 | cobra/version.py 10 | cobra/oven/* 11 | # deprecated code 12 | cobra/core/Formula.py 13 | # can not be run with free solver 14 | cobra/flux_analysis/moma.py 15 | 16 | 17 | [report] 18 | # Regexes for lines to exclude from consideration 19 | exclude_lines = 20 | # Have to re-enable the standard pragma 21 | pragma: no cover 22 | 23 | # Don't complain about missing debug-only code: 24 | def __repr__ 25 | if self\.debug 26 | print 27 | 28 | # Don't complain about missing legacy code 29 | _legacy 30 | 31 | # Don't complain if tests don't hit defensive assertion code: 32 | raise AssertionError 33 | raise NotImplementedError 34 | 35 | # Don't complain about code for importing 36 | import 37 | except ImportError 38 | 39 | # Don't complain if non-runnable code isn't run: 40 | if 0: 41 | if __name__ == .__main__.: 42 | 43 | # Don't complain about warnings 44 | warn 45 | 46 | partial_branches: 47 | pragma: no branch 48 | # Don't complain about missing legacy code 49 | _legacy 50 | 51 | ignore_errors = True 52 | 53 | [html] 54 | directory = coverage 55 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | sudo: false 3 | cache: 4 | directories: 5 | - $HOME/.cache/pip 6 | python: 7 | - "2.7" 8 | - "3.4" 9 | - "3.5" 10 | addons: 11 | apt: 12 | packages: 13 | - gfortran 14 | - libatlas-dev 15 | - libatlas-base-dev 16 | - liblapack-dev 17 | - libgmp-dev 18 | - libglpk-dev 19 | - libmpfr-dev 20 | 21 | # command to install dependencies 22 | env: 23 | - PIP_CACHE_DIR=$HOME/.cache/pip 24 | before_install: 25 | - pip install pip --upgrade 26 | # These get cached 27 | - pip install numpy scipy python-libsbml cython coveralls jsonschema six matplotlib pandas 28 | - if [[ $TRAVIS_PYTHON_VERSION == 2* ]]; then pip install lxml glpk pep8 palettable; fi 29 | # Download esolver and add it to the path 30 | - wget https://opencobra.github.io/pypi_cobrapy_travis/esolver.gz 31 | - gzip -d esolver.gz; chmod +x esolver; export PATH=$PATH:$PWD 32 | - mkdir -p $HOME/.config/matplotlib 33 | - "echo 'backend: Agg' >> $HOME/.config/matplotlib/matplotlibrc" 34 | install: 35 | - python setup.py develop 36 | # # command to run tests 37 | script: 38 | - coverage run --source=cobra setup.py test 39 | - if [[ $TRAVIS_PYTHON_VERSION == 2* ]]; then 40 | pep8 cobra --exclude=oven,solvers,sbml.py --show-source; 41 | fi 42 | after_success: 43 | - coveralls 44 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | Contribution Guidelines 2 | ----------------------- 3 | 4 | Generally, the following practices are recommended for making contributions to 5 | cobrapy. These aren't all necessarily hard-and-fast rules, but should serve as 6 | guidelines in most cases. 7 | 8 | 1. Please comment code. 9 | 2. All new python code should be pep8 compliant. 10 | 3. Please use git best practices, with a 50 line summary for each commit. 11 | Generally, separate features should be made in separate commits so 12 | they can be tested and merged independently. For example, adding a new 13 | solver would be a separate commit from fixing whitespace in cobra.core. 14 | 4. Documentation is written as IPython/jupyter notebooks in the 15 | ```documentation_builder``` directory, which are then converted to 16 | rst by the ```autodoc.sh``` script. 17 | 5. Tests are in the ```cobra/test``` directory. They are automatically run 18 | through continuous integration services on both python 2 and python 3 19 | when pull requests are made. 20 | 6. Please write tests for new functions. Writing documentation as well 21 | would also be very helpful. 22 | 7. Ensure code will work with both python 2 and python 3. For example, 23 | instead of ```my_dict.iteritems()``` use ```six.iteritems(my_dict)``` 24 | 25 | Thank you very much for contributing to cobrapy. 26 | -------------------------------------------------------------------------------- /cobra/core/Solution.py: -------------------------------------------------------------------------------- 1 | class Solution(object): 2 | """Stores the solution from optimizing a cobra.Model. This is 3 | used to provide a single interface to results from different 4 | solvers that store their values in different ways. 5 | 6 | f: The objective value 7 | 8 | solver: A string indicating which solver package was used. 9 | 10 | x: List or Array of the values from the primal. 11 | 12 | x_dict: A dictionary of reaction ids that maps to the primal values. 13 | 14 | y: List or Array of the values from the dual. 15 | 16 | y_dict: A dictionary of reaction ids that maps to the dual values. 17 | 18 | """ 19 | 20 | def __init__(self, f, x=None, 21 | x_dict=None, y=None, y_dict=None, 22 | solver=None, the_time=0, status='NA'): 23 | self.solver = solver 24 | self.f = f 25 | self.x = x 26 | self.x_dict = x_dict 27 | self.status = status 28 | self.y = y 29 | self.y_dict = y_dict 30 | 31 | def dress_results(self, model): 32 | """.. warning :: deprecated""" 33 | from warnings import warn 34 | warn("unnecessary to call this deprecated function") 35 | 36 | def __repr__(self): 37 | if self.f is None: 38 | return "" % (self.status, id(self)) 39 | return "" % (self.f, id(self)) 40 | -------------------------------------------------------------------------------- /cobra/core/Species.py: -------------------------------------------------------------------------------- 1 | from warnings import warn 2 | from copy import deepcopy 3 | from .Object import Object 4 | 5 | 6 | class Species(Object): 7 | """Species is a class for holding information regarding 8 | a chemical Species 9 | 10 | 11 | """ 12 | 13 | def __init__(self, id=None, name=None): 14 | """ 15 | id: A string. 16 | 17 | name: String. A human readable name. 18 | 19 | """ 20 | Object.__init__(self, id, name) 21 | self._model = None 22 | # references to reactions that operate on this species 23 | self._reaction = set() 24 | 25 | @property 26 | def reactions(self): 27 | return frozenset(self._reaction) 28 | 29 | def __getstate__(self): 30 | """Remove the references to container reactions when serializing to 31 | avoid problems associated with recursion. 32 | 33 | """ 34 | state = Object.__getstate__(self) 35 | state['_reaction'] = set() 36 | return state 37 | 38 | def copy(self): 39 | """When copying a reaction, it is necessary to deepcopy the 40 | components so the list references aren't carried over. 41 | 42 | Additionally, a copy of a reaction is no longer in a cobra.Model. 43 | 44 | This should be fixed with self.__deecopy__ if possible 45 | """ 46 | return deepcopy(self) 47 | 48 | @property 49 | def model(self): 50 | return(self._model) 51 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | local_settings.py 55 | 56 | # Flask instance folder 57 | instance/ 58 | 59 | # Scrapy stuff: 60 | .scrapy 61 | 62 | # Sphinx documentation 63 | docs/_build/ 64 | 65 | # PyBuilder 66 | target/ 67 | 68 | # IPython Notebook 69 | .ipynb_checkpoints 70 | 71 | # pyenv 72 | .python-version 73 | 74 | # dotenv 75 | .env 76 | 77 | # custom 78 | gurobi.log 79 | documentation 80 | documentation_builder/test*\.* 81 | examples/faq.py 82 | cobra.egg-info 83 | setuptools-*egg 84 | setuptools-*.tar.gz 85 | cobra/solvers/cglpk.c 86 | glpk.h 87 | libglpk.a 88 | .idea/ 89 | .DS_Store 90 | .eggs/ 91 | *\.swp 92 | manylinux_builder/wheelhouse 93 | -------------------------------------------------------------------------------- /cobra/oven/README: -------------------------------------------------------------------------------- 1 | This is the location for self-contained add-ons that are currently baking. Please try to organize your buns into rational categories. Occasionally, I will reorganize items in a logical fashion as the module set grows and my vision develops. 2 | 3 | If you want to contribute to the oven, please create a directory with your sourceforge username and put all files in it. 4 | 5 | 6 | For modifications to the pre-existing code, please continue to use the patch tracker or contact the sourceforge username associated with the files. 7 | 8 | Please follow the python style guide 9 | http://www.python.org/dev/peps/pep-0008/ 10 | and document thoroughly. Modules that deviate from these two commandments will not make it into the core and may result in revocation of svn privileges. 11 | 12 | 13 | Also, remember the following: 14 | 1) Document thoroughly. The world is much bigger than you really seem to think. 15 | 2) No camelCase or camelToes no matter who sports them. The exception: classes must be defined with CamelToeCase. 16 | 3) No non-standard lazy abbreviations kegg is acceptable mets, rxns, and 17 | coefs are not. 18 | 4) phrasesmusthaveunderscores is bad phrases_must_have_underscores is 19 | good 20 | 5) For default values in functions don't put spaces around the =. def 21 | my_cat(color = 'black') is bad. def my_cat(color='black') is good. 22 | 6) Also, unless absolutely necessary you should import functions from modules and not use the whole path name. This allows for faster transitions if we need to change an upstream module or want to maintain python and jython compatibility, or want to test some new package. 23 | 24 | If a function something requires a very specific set of data files then it 25 | is best to make a module. 26 | 27 | 28 | Happy coding! 29 | 30 | Dr. S. 31 | -------------------------------------------------------------------------------- /appveyor/build_glpk.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import hashlib 4 | import tarfile 5 | import struct 6 | import shutil 7 | import setuptools.msvc9_support 8 | try: 9 | import urllib2 10 | except ImportError: # python 3 11 | import urllib.request as urllib2 12 | 13 | # these need to be set to the latest glpk version 14 | glpk_version = "4.60" 15 | glpk_md5 = "eda7965907f6919ffc69801646f13c3e" 16 | 17 | glpk_build_dir = "glpk_build/glpk-%s" % glpk_version 18 | url = "http://ftp.gnu.org/gnu/glpk/glpk-%s.tar.gz" % glpk_version 19 | bitness = struct.calcsize("P") * 8 20 | 21 | 22 | def md5(fname): 23 | hash = hashlib.md5() 24 | with open(fname, "rb") as f: 25 | for chunk in iter(lambda: f.read(4096), b""): 26 | hash.update(chunk) 27 | return hash.hexdigest() 28 | 29 | 30 | def get_vcvarsall_cmd(): 31 | py_ver = sys.version_info 32 | if py_ver.major == 3 and py_ver.minor >= 5: 33 | vc_ver = 14 34 | elif py_ver.major == 3 and py_ver.minor >= 3: 35 | vc_ver = 10 36 | else: 37 | vc_ver = 9 38 | vc_path = setuptools.msvc9_support.find_vcvarsall(vc_ver) 39 | assert vc_path is not None 40 | return '"%s" %s' % (vc_path, " amd64" if bitness == 64 else "") 41 | 42 | 43 | if not os.path.isdir("glpk_build/"): 44 | os.mkdir("glpk_build") 45 | if not os.path.isdir(glpk_build_dir): 46 | response = urllib2.urlopen(url) 47 | with open("glpk-download.tar.gz", "wb") as outfile: 48 | outfile.write(response.read()) 49 | assert md5("glpk-download.tar.gz") == glpk_md5 50 | with tarfile.open("glpk-download.tar.gz") as infile: 51 | infile.extractall("glpk_build") 52 | 53 | os.chdir("%s/w%d" % (glpk_build_dir, bitness)) 54 | if not os.path.isfile("glpk.lib"): 55 | shutil.copy2("config_VC", "config.h") 56 | os.system(get_vcvarsall_cmd() + "& nmake /f Makefile_VC") 57 | shutil.copy2("glpk.lib", "../../..") 58 | os.chdir("../../..") 59 | shutil.copy2(glpk_build_dir + "/src/glpk.h", ".") 60 | -------------------------------------------------------------------------------- /documentation_builder/cobra.core.rst: -------------------------------------------------------------------------------- 1 | cobra.core package 2 | ================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | cobra.core.ArrayBasedModel module 8 | --------------------------------- 9 | 10 | .. automodule:: cobra.core.ArrayBasedModel 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | cobra.core.DictList module 16 | -------------------------- 17 | 18 | .. automodule:: cobra.core.DictList 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | cobra.core.Formula module 24 | ------------------------- 25 | 26 | .. automodule:: cobra.core.Formula 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | cobra.core.Gene module 32 | ---------------------- 33 | 34 | .. automodule:: cobra.core.Gene 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | cobra.core.Metabolite module 40 | ---------------------------- 41 | 42 | .. automodule:: cobra.core.Metabolite 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | cobra.core.Model module 48 | ----------------------- 49 | 50 | .. automodule:: cobra.core.Model 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | 55 | cobra.core.Object module 56 | ------------------------ 57 | 58 | .. automodule:: cobra.core.Object 59 | :members: 60 | :undoc-members: 61 | :show-inheritance: 62 | 63 | cobra.core.Reaction module 64 | -------------------------- 65 | 66 | .. automodule:: cobra.core.Reaction 67 | :members: 68 | :undoc-members: 69 | :show-inheritance: 70 | 71 | cobra.core.Solution module 72 | -------------------------- 73 | 74 | .. automodule:: cobra.core.Solution 75 | :members: 76 | :undoc-members: 77 | :show-inheritance: 78 | 79 | cobra.core.Species module 80 | ------------------------- 81 | 82 | .. automodule:: cobra.core.Species 83 | :members: 84 | :undoc-members: 85 | :show-inheritance: 86 | 87 | 88 | Module contents 89 | --------------- 90 | 91 | .. automodule:: cobra.core 92 | :members: 93 | :undoc-members: 94 | :show-inheritance: 95 | -------------------------------------------------------------------------------- /appveyor.yml: -------------------------------------------------------------------------------- 1 | environment: 2 | 3 | global: 4 | # SDK v7.0 MSVC Express 2008's SetEnv.cmd script will fail if the 5 | # /E:ON and /V:ON options are not enabled in the batch script intepreter 6 | # See: http://stackoverflow.com/a/13751649/163740 7 | WITH_COMPILER: "cmd /E:ON /V:ON /C .\\appveyor\\run_with_env.cmd" 8 | PIP_CACHE_DIR: "pip_cache" 9 | 10 | matrix: 11 | - PYTHON: "C:\\Python27" 12 | PYTHON_VERSION: "2.7.12" 13 | PYTHON_ARCH: "32" 14 | 15 | - PYTHON: "C:\\Python34" 16 | PYTHON_VERSION: "3.4.5" 17 | PYTHON_ARCH: "32" 18 | 19 | - PYTHON: "C:\\Python35" 20 | PYTHON_VERSION: "3.5.2" 21 | PYTHON_ARCH: "32" 22 | 23 | - PYTHON: "C:\\Python27-x64" 24 | PYTHON_VERSION: "2.7.12" 25 | PYTHON_ARCH: "64" 26 | 27 | - PYTHON: "C:\\Python34-x64" 28 | PYTHON_VERSION: "3.4.5" 29 | PYTHON_ARCH: "64" 30 | 31 | - PYTHON: "C:\\Python35-x64" 32 | PYTHON_VERSION: "3.5.2" 33 | PYTHON_ARCH: "64" 34 | 35 | clone_depth: 25 36 | 37 | init: 38 | - "ECHO %PYTHON% %PYTHON_VERSION% %PYTHON_ARCH%bit" 39 | 40 | cache: 41 | - glpk_build -> appveyor/build_glpk.py 42 | - pip_cache -> appveyor.yml 43 | 44 | 45 | install: 46 | - "powershell appveyor\\install.ps1" 47 | - ps: Start-FileDownload 'https://bitbucket.org/gutworth/six/raw/default/six.py' 48 | - "%WITH_COMPILER% %PYTHON%/python appveyor/build_glpk.py" 49 | - "%PYTHON%/python -m pip install pip setuptools wheel --upgrade" 50 | - "%PYTHON%/python -m pip install Cython jsonschema twine pypandoc==1.1.3" 51 | 52 | build: off 53 | 54 | test_script: 55 | - "%WITH_COMPILER% %PYTHON%/python setup.py test" 56 | 57 | after_test: 58 | - "%WITH_COMPILER% %PYTHON%/python setup.py bdist_wheel bdist_wininst" 59 | 60 | artifacts: 61 | - path: dist\* 62 | 63 | deploy_script: 64 | - ps: >- 65 | if($env:appveyor_repo_tag -eq 'True') { 66 | Invoke-Expression "$env:PYTHON/Scripts/twine upload dist/* --username $env:PYPI_USERNAME --password $env:PYPI_PASSWORD" 67 | } 68 | 69 | #on_success: 70 | # - TODO: upload the content of dist/*.whl to a public wheelhouse 71 | -------------------------------------------------------------------------------- /cobra/test/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from os.path import join, abspath, dirname 3 | import unittest as _unittest 4 | 5 | try: 6 | from cPickle import load as _load 7 | except: 8 | from pickle import load as _load 9 | 10 | from ..io import read_sbml_model 11 | 12 | 13 | available_tests = ['unit_tests', 'solvers', 'flux_analysis', 'io_tests', 14 | 'design', 'manipulation'] 15 | 16 | 17 | cobra_directory = abspath(join(dirname(abspath(__file__)), "..")) 18 | cobra_location = abspath(join(cobra_directory, "..")) 19 | data_directory = join(cobra_directory, "test", "data", "") 20 | 21 | salmonella_sbml = join(data_directory, "salmonella.xml") 22 | salmonella_pickle = join(data_directory, "salmonella.pickle") 23 | 24 | ecoli_sbml = join(data_directory, "iJO1366.xml") 25 | textbook_sbml = join(data_directory, "textbook.xml.gz") 26 | mini_sbml = join(data_directory, "mini_fbc2.xml") 27 | 28 | del abspath, join, dirname 29 | 30 | 31 | def create_test_model(model_name="salmonella"): 32 | """Returns a cobra model for testing 33 | 34 | model_name: str 35 | One of 'ecoli', 'textbook', or 'salmonella', or the 36 | path to a pickled cobra.Model 37 | 38 | """ 39 | 40 | if model_name == "ecoli": 41 | return read_sbml_model(ecoli_sbml) 42 | elif model_name == "textbook": 43 | return read_sbml_model(textbook_sbml) 44 | elif model_name == "mini": 45 | return read_sbml_model(mini_sbml) 46 | 47 | if model_name == "salmonella": 48 | model_name = salmonella_pickle 49 | with open(model_name, "rb") as infile: 50 | return _load(infile) 51 | 52 | 53 | def create_test_suite(): 54 | """create a unittest.TestSuite with available tests""" 55 | loader = _unittest.TestLoader() 56 | suite = _unittest.TestSuite() 57 | for test_name in available_tests: 58 | exec("from . import " + test_name) 59 | suite.addTests(loader.loadTestsFromModule(eval(test_name))) 60 | return suite 61 | 62 | suite = create_test_suite() 63 | 64 | 65 | def test_all(): 66 | """###running unit tests on cobra py###""" 67 | status = not _unittest.TextTestRunner(verbosity=2).run( 68 | create_test_suite() 69 | ).wasSuccessful() 70 | return status 71 | -------------------------------------------------------------------------------- /cobra/manipulation/validate.py: -------------------------------------------------------------------------------- 1 | from math import isinf, isnan 2 | 3 | NOT_MASS_BALANCED_TERMS = {"SBO:0000627", # EXCHANGE 4 | "SBO:0000628", # DEMAND 5 | "SBO:0000629", # BIOMASS 6 | "SBO:0000631", # PSEUDOREACTION 7 | "SBO:0000632", # SINK 8 | } 9 | 10 | 11 | def check_mass_balance(model): 12 | warnings = [] 13 | unbalanced = {} 14 | for reaction in model.reactions: 15 | if reaction.annotation.get("SBO") not in NOT_MASS_BALANCED_TERMS: 16 | balance = reaction.check_mass_balance() 17 | if balance: 18 | unbalanced[reaction] = balance 19 | return unbalanced 20 | 21 | 22 | def check_reaction_bounds(model): 23 | errors = [] 24 | for reaction in model.reactions: 25 | if reaction.lower_bound > reaction.upper_bound: 26 | errors.append("Reaction '%s' has lower bound > upper bound" % 27 | reaction.id) 28 | if isinf(reaction.lower_bound): 29 | errors.append("Reaction '%s' has infinite lower_bound" % 30 | reaction.id) 31 | elif isnan(reaction.lower_bound): 32 | errors.append("Reaction '%s' has NaN for lower_bound" % 33 | reaction.id) 34 | if isinf(reaction.upper_bound): 35 | errors.append("Reaction '%s' has infinite upper_bound" % 36 | reaction.id) 37 | elif isnan(reaction.upper_bound): 38 | errors.append("Reaction '%s' has NaN for upper_bound" % 39 | reaction.id) 40 | return errors 41 | 42 | 43 | def check_metabolite_compartment_formula(model): 44 | errors = [] 45 | for met in model.metabolites: 46 | if met.compartment is not None and \ 47 | met.compartment not in model.compartments: 48 | errors.append("Metabolite '%s' compartment '%s' not found" % 49 | (met.id, met.compartment)) 50 | if met.formula is not None and len(met.formula) > 0: 51 | if not met.formula.isalnum(): 52 | errors.append("Metabolite '%s' formula '%s' not alphanumeric" % 53 | (met.id, met.formula)) 54 | return errors 55 | -------------------------------------------------------------------------------- /cobra/oven/danielhyduke/query/query.py: -------------------------------------------------------------------------------- 1 | #cobra.query.query.py 2 | #Will serve as a location to house the growing number of 3 | #simple query functions attached to cobra.Model 4 | 5 | #NOTE: Many of the find functions are gone because Reactions, 6 | #Metabolites, and Genes are now away of each other. 7 | 8 | import re 9 | ##### 10 | def print_reactions_involving_metabolite(cobra_model, the_metabolites): 11 | """Update to allow for multiple metabolite search 12 | 13 | cobra_model: A cobra.Model object 14 | 15 | the_metabolites: A list of cobra.Metabolites or metabolite ids that are in 16 | cobra_metabolites. 17 | 18 | #TODO: Move this to the Metabolite class 19 | 20 | """ 21 | if hasattr(the_metabolites, 'id'): 22 | the_metabolites = [the_metabolites] 23 | elif not hasattr(the_metabolites, '__iter__'): 24 | the_metabolites = [the_metabolites] 25 | if not hasattr(the_metabolites[0], 'id'): 26 | the_metabolites = [cobra_model.metabolites[cobra_model.metabolites.index(x)] 27 | for x in the_metabolites] 28 | 29 | for the_metabolite in the_metabolties: 30 | for the_reaction in the_metabolite._reaction: 31 | print the_reaction.reaction 32 | 33 | 34 | def get_translation_reactions(cobra_model, genes_of_interest): 35 | """Find the translation elongation reactions for a set of genes 36 | in a cobra model. Related to ME-model extensions 37 | 38 | cobra_model: A cobra.Model object. 39 | 40 | genes_of_interest: A list of genes from cobra_model.genes. 41 | 42 | """ 43 | gene_translation_reactions = defaultdict(list) 44 | for the_reaction in cobra_model.reactions: 45 | if 'translation_elongation' in the_reaction: 46 | for the_gene in genes_of_interest: 47 | if the_gene in the_reaction: 48 | gene_translation_reactions[the_gene].append(the_reaction) 49 | continue 50 | return gene_translation_reactions 51 | 52 | 53 | if __name__ == '__main__': 54 | from cPickle import load 55 | from time import time 56 | solver = 'glpk' 57 | test_directory = '../test/data/' 58 | with open(test_directory + 'salmonella.pickle') as in_file: 59 | cobra_model = load(in_file) 60 | 61 | #TODO: Add in tests for each function 62 | print 'Need to add in tests for %s'%repr(['print_reactions_involving_metabolite']) 63 | 64 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | cobrapy 2 | ======= 3 | [![Build Status](https://travis-ci.org/opencobra/cobrapy.svg?branch=master)](https://travis-ci.org/opencobra/cobrapy) 4 | [![Coverage Status](https://coveralls.io/repos/opencobra/cobrapy/badge.svg?branch=master&service=github)](https://coveralls.io/github/opencobra/cobrapy?branch=master) 5 | [![Build status](https://ci.appveyor.com/api/projects/status/2o549lhjyukke8nd/branch/master?svg=true)](https://ci.appveyor.com/project/aebrahim/cobrapy/branch/master) 6 | [![PyPI](https://img.shields.io/pypi/v/cobra.svg)](https://pypi.python.org/pypi/cobra) 7 | 8 | 9 | COnstraint-Based Reconstruction and Analysis (COBRA) methods are widely used 10 | for genome-scale modeling of metabolic networks in both prokaryotes and 11 | eukaryotes. COBRApy is a constraint-based modeling package that is designed to 12 | accommodate the biological complexity of the next generation of COBRA models and 13 | provides access to commonly used COBRA methods, such as flux balance analysis, 14 | flux variability analysis, and gene deletion analyses. 15 | 16 | To install, please follow the [instructions](INSTALL.md). 17 | 18 | The documentation is browseable online at 19 | [readthedocs](https://cobrapy.readthedocs.org/en/stable/) 20 | and can also be 21 | [downloaded](https://readthedocs.org/projects/cobrapy/downloads/). 22 | 23 | Please use the [Google Group](http://groups.google.com/group/cobra-pie) for 24 | help. More information about opencobra is available at the 25 | [website](http://opencobra.github.io/). 26 | 27 | If you use cobrapy in a scientific publication, please cite 28 | [doi:10.1186/1752-0509-7-74](http://dx.doi.org/doi:10.1186/1752-0509-7-74) 29 | 30 | License 31 | ------- 32 | The cobrapy source is released under both the GPL and LGPL licenses. You may 33 | choose which license you choose to use the software under. However, please note 34 | that binary packages which include GLPK (such as the binary wheels distributed 35 | at https://pypi.python.org/pypi/cobra) will be bound by its license as well. 36 | 37 | This program is free software: you can redistribute it and/or modify it under 38 | the terms of the GNU General Public License or the Lesser GNU General Public 39 | License as published by the Free Software Foundation, either version 2 of the 40 | License, or (at your option) any later version. 41 | 42 | This program is distributed in the hope that it will be useful, but WITHOUT ANY 43 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A 44 | PARTICULAR PURPOSE. See the GNU General Public License for more details. 45 | 46 | -------------------------------------------------------------------------------- /cobra/test/data/invalid1.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /cobra/flux_analysis/loopless.py: -------------------------------------------------------------------------------- 1 | from ..core import Reaction, Metabolite 2 | from ..manipulation.modify import convert_to_irreversible 3 | from six import iteritems 4 | 5 | 6 | def construct_loopless_model(cobra_model): 7 | """construct a loopless model 8 | 9 | This adds MILP constraints to prevent flux from proceeding in a loop, as 10 | done in http://dx.doi.org/10.1016/j.bpj.2010.12.3707 11 | Please see the documentation for an explanation of the algorithm. 12 | 13 | This must be solved with an MILP capable solver. 14 | 15 | """ 16 | # copy the model and make it irreversible 17 | model = cobra_model.copy() 18 | convert_to_irreversible(model) 19 | max_ub = max(model.reactions.list_attr("upper_bound")) 20 | # a dict for storing S^T 21 | thermo_stoic = {"thermo_var_" + metabolite.id: {} 22 | for metabolite in model.metabolites} 23 | # Slice operator is so that we don't get newly added metabolites 24 | original_metabolites = model.metabolites[:] 25 | for reaction in model.reactions[:]: 26 | # Boundary reactions are not subjected to these constraints 27 | if len(reaction._metabolites) == 1: 28 | continue 29 | # populate the S^T dict 30 | bound_id = "thermo_bound_" + reaction.id 31 | for met, stoic in iteritems(reaction._metabolites): 32 | thermo_stoic["thermo_var_" + met.id][bound_id] = stoic 33 | # I * 1000 > v --> I * 1000 - v > 0 34 | reaction_ind = Reaction(reaction.id + "_indicator") 35 | reaction_ind.variable_kind = "integer" 36 | reaction_ind.upper_bound = 1 37 | reaction_ub = Metabolite(reaction.id + "_ind_ub") 38 | reaction_ub._constraint_sense = "G" 39 | reaction.add_metabolites({reaction_ub: -1}) 40 | reaction_ind.add_metabolites({reaction_ub: max_ub}) 41 | # This adds a compensating term for 0 flux reactions, so we get 42 | # S^T x - (1 - I) * 1001 < -1 which becomes 43 | # S^T x < 1000 for 0 flux reactions and 44 | # S^T x < -1 for reactions with nonzero flux. 45 | reaction_bound = Metabolite(bound_id) 46 | reaction_bound._constraint_sense = "L" 47 | reaction_bound._bound = max_ub 48 | reaction_ind.add_metabolites({reaction_bound: max_ub + 1}) 49 | model.add_reaction(reaction_ind) 50 | for metabolite in original_metabolites: 51 | metabolite_var = Reaction("thermo_var_" + metabolite.id) 52 | metabolite_var.lower_bound = -max_ub 53 | model.add_reaction(metabolite_var) 54 | metabolite_var.add_metabolites( 55 | {model.metabolites.get_by_id(k): v 56 | for k, v in iteritems(thermo_stoic[metabolite_var.id])}) 57 | return model 58 | -------------------------------------------------------------------------------- /documentation_builder/cobra.flux_analysis.rst: -------------------------------------------------------------------------------- 1 | cobra.flux_analysis package 2 | =========================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | cobra.flux_analysis.deletion_worker module 8 | ------------------------------------------ 9 | 10 | .. automodule:: cobra.flux_analysis.deletion_worker 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | cobra.flux_analysis.double_deletion module 16 | ------------------------------------------ 17 | 18 | .. automodule:: cobra.flux_analysis.double_deletion 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | cobra.flux_analysis.essentiality module 24 | --------------------------------------- 25 | 26 | .. automodule:: cobra.flux_analysis.essentiality 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | cobra.flux_analysis.gapfilling module 32 | ------------------------------------- 33 | 34 | .. automodule:: cobra.flux_analysis.gapfilling 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | cobra.flux_analysis.loopless module 40 | ----------------------------------- 41 | 42 | .. automodule:: cobra.flux_analysis.loopless 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | cobra.flux_analysis.moma module 48 | ------------------------------- 49 | 50 | .. automodule:: cobra.flux_analysis.moma 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | 55 | cobra.flux_analysis.parsimonious module 56 | --------------------------------------- 57 | 58 | .. automodule:: cobra.flux_analysis.parsimonious 59 | :members: 60 | :undoc-members: 61 | :show-inheritance: 62 | 63 | cobra.flux_analysis.phenotype_phase_plane module 64 | ------------------------------------------------ 65 | 66 | .. automodule:: cobra.flux_analysis.phenotype_phase_plane 67 | :members: 68 | :undoc-members: 69 | :show-inheritance: 70 | 71 | cobra.flux_analysis.reaction module 72 | ----------------------------------- 73 | 74 | .. automodule:: cobra.flux_analysis.reaction 75 | :members: 76 | :undoc-members: 77 | :show-inheritance: 78 | 79 | cobra.flux_analysis.single_deletion module 80 | ------------------------------------------ 81 | 82 | .. automodule:: cobra.flux_analysis.single_deletion 83 | :members: 84 | :undoc-members: 85 | :show-inheritance: 86 | 87 | cobra.flux_analysis.summary module 88 | ---------------------------------- 89 | 90 | .. automodule:: cobra.flux_analysis.summary 91 | :members: 92 | :undoc-members: 93 | :show-inheritance: 94 | 95 | cobra.flux_analysis.variability module 96 | -------------------------------------- 97 | 98 | .. automodule:: cobra.flux_analysis.variability 99 | :members: 100 | :undoc-members: 101 | :show-inheritance: 102 | 103 | 104 | Module contents 105 | --------------- 106 | 107 | .. automodule:: cobra.flux_analysis 108 | :members: 109 | :undoc-members: 110 | :show-inheritance: 111 | -------------------------------------------------------------------------------- /documentation_builder/plot_helper.py: -------------------------------------------------------------------------------- 1 | from matplotlib.pyplot import figure, xlim, ylim, gca, arrow, text, scatter 2 | from mpl_toolkits.axes_grid.axislines import SubplotZero 3 | from numpy import linspace, arange, sqrt, pi, sin, cos, sign 4 | from IPython.display import set_matplotlib_formats 5 | 6 | set_matplotlib_formats('png', 'pdf') 7 | 8 | 9 | # axis style 10 | def make_plot_ax(): 11 | fig = figure(figsize=(6, 5)) 12 | ax = SubplotZero(fig, 111) 13 | fig.add_subplot(ax) 14 | for direction in ["xzero", "yzero"]: 15 | ax.axis[direction].set_axisline_style("-|>") 16 | ax.axis[direction].set_visible(True) 17 | for direction in ["left", "right", "bottom", "top"]: 18 | ax.axis[direction].set_visible(False) 19 | xlim(-0.1, 2.1) 20 | ylim(xlim()) 21 | ticks = [0.5 * i for i in range(1, 5)] 22 | labels = [str(i) if i == int(i) else "" for i in ticks] 23 | ax.set_xticks(ticks) 24 | ax.set_yticks(ticks) 25 | ax.set_xticklabels(labels) 26 | ax.set_yticklabels(labels) 27 | ax.axis["yzero"].set_axis_direction("left") 28 | return ax 29 | 30 | 31 | def plot_qp1(): 32 | ax = make_plot_ax() 33 | ax.plot((0, 2), (2, 0), 'b') 34 | ax.plot([1], [1], 'bo') 35 | 36 | # circular grid 37 | for r in sqrt(2.) + 0.125 * arange(-11, 6): 38 | t = linspace(0., pi/2., 100) 39 | ax.plot(r * cos(t), r * sin(t), '-.', color="gray") 40 | 41 | 42 | def plot_qp2(): 43 | ax = make_plot_ax() 44 | ax.plot((0, 2), (2, 0), 'b') 45 | ax.plot([0.5], [1.5], 'bo') 46 | 47 | yrange = linspace(1, 2, 11) 48 | for r in (yrange ** 2 / 2. - yrange): 49 | t = linspace(-sqrt(2 * r + 1) + 0.000001, 50 | sqrt(2 * r + 1) - 0.000001, 1000) 51 | ax.plot(abs(t), 1 + sqrt(2 * r + 1 - t ** 2) * sign(t), '-.', 52 | color="gray") 53 | 54 | 55 | def plot_loop(): 56 | figure(figsize=(10.5, 4.5), frameon=False) 57 | gca().axis("off") 58 | xlim(0.5, 3.5) 59 | ylim(0.7, 2.2) 60 | arrow_params = {"head_length": 0.08, "head_width": 0.1, "ec": "k", 61 | "fc": "k"} 62 | text_params = {"fontsize": 25, "horizontalalignment": "center", 63 | "verticalalignment": "center"} 64 | arrow(0.5, 1, 0.85, 0, **arrow_params) # EX_A 65 | arrow(1.5, 1, 0.425, 0.736, **arrow_params) # v1 66 | arrow(2.04, 1.82, 0.42, -0.72, **arrow_params) # v2 67 | arrow(2.4, 1, -0.75, 0, **arrow_params) # v3 68 | arrow(2.6, 1, 0.75, 0, **arrow_params) 69 | # reaction labels 70 | text(0.9, 1.15, "EX_A", **text_params) 71 | text(1.6, 1.5, r"v$_1$", **text_params) 72 | text(2.4, 1.5, r"v$_2$", **text_params) 73 | text(2, 0.85, r"v$_3$", **text_params) 74 | text(2.9, 1.15, "DM_C", **text_params) 75 | # metabolite labels 76 | scatter(1.5, 1, s=250, color='#c994c7') 77 | text(1.5, 0.9, "A", **text_params) 78 | scatter(2, 1.84, s=250, color='#c994c7') 79 | text(2, 1.95, "B", **text_params) 80 | scatter(2.5, 1, s=250, color='#c994c7') 81 | text(2.5, 0.9, "C", **text_params) 82 | -------------------------------------------------------------------------------- /appveyor/install.ps1: -------------------------------------------------------------------------------- 1 | # Sample script to install Python and pip under Windows 2 | # Authors: Olivier Grisel and Kyle Kastner 3 | # License: CC0 1.0 Universal: http://creativecommons.org/publicdomain/zero/1.0/ 4 | 5 | $BASE_URL = "https://www.python.org/ftp/python/" 6 | $GET_PIP_URL = "https://bootstrap.pypa.io/get-pip.py" 7 | $GET_PIP_PATH = "C:\get-pip.py" 8 | 9 | 10 | function DownloadPython ($python_version, $platform_suffix) { 11 | $webclient = New-Object System.Net.WebClient 12 | $filename = "python-" + $python_version + $platform_suffix + ".msi" 13 | $url = $BASE_URL + $python_version + "/" + $filename 14 | 15 | $basedir = $pwd.Path + "\" 16 | $filepath = $basedir + $filename 17 | if (Test-Path $filename) { 18 | Write-Host "Reusing" $filepath 19 | return $filepath 20 | } 21 | 22 | # Download and retry up to 5 times in case of network transient errors. 23 | Write-Host "Downloading" $filename "from" $url 24 | $retry_attempts = 3 25 | for($i=0; $i -lt $retry_attempts; $i++){ 26 | try { 27 | $webclient.DownloadFile($url, $filepath) 28 | break 29 | } 30 | Catch [Exception]{ 31 | Start-Sleep 1 32 | } 33 | } 34 | Write-Host "File saved at" $filepath 35 | return $filepath 36 | } 37 | 38 | 39 | function InstallPython ($python_version, $architecture, $python_home) { 40 | Write-Host "Installing Python" $python_version "for" $architecture "bit architecture to" $python_home 41 | if (Test-Path $python_home) { 42 | Write-Host $python_home "already exists, skipping." 43 | return $false 44 | } 45 | if ($architecture -eq "32") { 46 | $platform_suffix = "" 47 | } else { 48 | $platform_suffix = ".amd64" 49 | } 50 | $filepath = DownloadPython $python_version $platform_suffix 51 | Write-Host "Installing" $filepath "to" $python_home 52 | $args = "/qn /i $filepath TARGETDIR=$python_home" 53 | Write-Host "msiexec.exe" $args 54 | Start-Process -FilePath "msiexec.exe" -ArgumentList $args -Wait -Passthru 55 | Write-Host "Python $python_version ($architecture) installation complete" 56 | return $true 57 | } 58 | 59 | 60 | function InstallPip ($python_home) { 61 | $pip_path = $python_home + "/Scripts/pip.exe" 62 | $python_path = $python_home + "/python.exe" 63 | if (-not(Test-Path $pip_path)) { 64 | Write-Host "Installing pip..." 65 | $webclient = New-Object System.Net.WebClient 66 | $webclient.DownloadFile($GET_PIP_URL, $GET_PIP_PATH) 67 | Write-Host "Executing:" $python_path $GET_PIP_PATH 68 | Start-Process -FilePath "$python_path" -ArgumentList "$GET_PIP_PATH" -Wait -Passthru 69 | } else { 70 | Write-Host "pip already installed." 71 | } 72 | } 73 | 74 | function InstallPackage ($python_home, $pkg) { 75 | $pip_path = $python_home + "/Scripts/pip.exe" 76 | & $pip_path install $pkg 77 | } 78 | 79 | function main () { 80 | InstallPython $env:PYTHON_VERSION $env:PYTHON_ARCH $env:PYTHON 81 | InstallPip $env:PYTHON 82 | InstallPackage $env:PYTHON wheel 83 | } 84 | 85 | main 86 | -------------------------------------------------------------------------------- /cobra/test/data/invalid0.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | -------------------------------------------------------------------------------- /cobra/flux_analysis/essentiality.py: -------------------------------------------------------------------------------- 1 | from warnings import warn 2 | try: 3 | from cobra.flux_analysis.moma import moma 4 | except: 5 | warn("moma does not appear to be functional on your system") 6 | from cobra.manipulation import initialize_growth_medium 7 | 8 | 9 | def assess_medium_component_essentiality(cobra_model, the_components=None, 10 | the_medium=None, 11 | medium_compartment='e', solver='glpk', 12 | the_condition=None, method='fba'): 13 | """Determines which components in an in silico medium are essential for 14 | growth in the context of the remaining components. 15 | 16 | cobra_model: A Model object. 17 | 18 | the_components: None or a list of external boundary reactions that will be 19 | sequentially disabled. 20 | 21 | the_medium: Is None, a string, or a dictionary. If a string then the 22 | initialize_growth_medium function expects that the_model has an attribute 23 | dictionary called media_compositions, which is a dictionary of dictionaries 24 | for various medium compositions. Where a medium composition is a 25 | dictionary of external boundary reaction ids for the medium components and 26 | the external boundary fluxes for each medium component. 27 | 28 | medium_compartment: the compartment in which the boundary reactions 29 | supplying the medium components exist 30 | 31 | NOTE: that these fluxes must be negative because the convention is 32 | backwards means something is feed into the system. 33 | 34 | solver: 'glpk', 'gurobi', or 'cplex' 35 | 36 | returns: essentiality_dict: A dictionary providing the maximum growth rate 37 | accessible when the respective component is removed from the medium. 38 | 39 | """ 40 | if method.lower() == 'moma': 41 | wt_model = cobra_model.copy() 42 | cobra_model = cobra_model.copy() 43 | 44 | if isinstance(the_medium, str): 45 | try: 46 | the_medium = cobra_model.media_compositions[the_medium] 47 | except: 48 | raise Exception( 49 | the_medium + " is not in cobra_model.media_compositions") 50 | if the_medium is not None: 51 | initialize_growth_medium(cobra_model, the_medium, medium_compartment) 52 | if the_components is None: 53 | the_components = the_medium.keys() 54 | if not the_components: 55 | raise Exception("You need to specify the_components or the_medium") 56 | essentiality_dict = {} 57 | for the_component in the_components: 58 | the_reaction = cobra_model.reactions.get_by_id(the_component) 59 | original_lower_bound = float(the_reaction.lower_bound) 60 | the_reaction.lower_bound = 0. 61 | if method.lower() == 'fba': 62 | cobra_model.optimize(solver=solver) 63 | objective_value = cobra_model.solution.f 64 | elif method.lower() == 'moma': 65 | objective_value = moma(wt_model, cobra_model, solver=solver)[ 66 | 'objective_value'] 67 | essentiality_dict[the_component] = objective_value 68 | the_reaction.lower_bound = original_lower_bound 69 | 70 | return(essentiality_dict) 71 | -------------------------------------------------------------------------------- /cobra/test/data/update_pickles.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # This script regenerates pickles of cobra Models. Should be 3 | # performed after updating core classes to prevent subtle bugs. 4 | try: 5 | from cPickle import load, dump 6 | except: 7 | from pickle import load, dump 8 | 9 | from json import dump as json_dump 10 | from collections import OrderedDict 11 | 12 | import cobra 13 | from cobra.version import get_version 14 | from cobra.io import read_sbml_model, write_sbml_model, save_matlab_model, \ 15 | save_json_model 16 | from cobra.io.sbml3 import write_sbml2 17 | 18 | # ecoli 19 | ecoli_model = read_sbml_model("iJO1366.xml") 20 | with open("iJO1366.pickle", "wb") as outfile: 21 | dump(ecoli_model, outfile, protocol=2) 22 | 23 | # salmonella 24 | salmonella = read_sbml_model("salmonella.xml") 25 | with open("salmonella.genes", "rb") as infile: 26 | gene_names = load(infile) 27 | for gene in salmonella.genes: 28 | gene.name = gene_names[gene.id] 29 | with open("salmonella.media", "rb") as infile: 30 | salmonella.media_compositions = load(infile) 31 | with open("salmonella.pickle", "wb") as outfile: 32 | dump(salmonella, outfile, protocol=2) 33 | 34 | # create mini model from textbook 35 | textbook = read_sbml_model("textbook.xml.gz") 36 | mini = cobra.Model("mini_textbook") 37 | mini.compartments = textbook.compartments 38 | 39 | 40 | for r in textbook.reactions: 41 | if r.id in ("GLCpts", "PGI", "PFK", "FBA", "TPI", "GAPD", "PGK", "PGM", 42 | "ENO", "PYK", "EX_glc__D_e", "EX_h_e", "H2Ot", "ATPM", 43 | "PIt2r"): 44 | mini.add_reaction(r.copy()) 45 | mini.reactions.ATPM.upper_bound = mini.reactions.PGI.upper_bound 46 | mini.change_objective("ATPM") # No biomass 47 | 48 | # add in some information from iJO1366 49 | mini.add_reaction(ecoli_model.reactions.LDH_D.copy()) 50 | mini.add_reaction(ecoli_model.reactions.EX_lac__D_e.copy()) 51 | r = cobra.Reaction("D_LACt2") 52 | mini.add_reaction(r) 53 | r.gene_reaction_rule = ecoli_model.reactions.D__LACt2pp.gene_reaction_rule 54 | r.reaction = ecoli_model.reactions.D__LACt2pp.reaction.replace("_p", "_e") 55 | mini.reactions.GLCpts.gene_reaction_rule = \ 56 | ecoli_model.reactions.GLCptspp.gene_reaction_rule 57 | 58 | # adjust bounds 59 | for i in ["ATPM", "D_LACt2", "EX_lac__D_e", "LDH_D"]: 60 | mini.reactions.get_by_id(i).upper_bound = mini.reactions.PGI.upper_bound 61 | for i in ["D_LACt2", "LDH_D"]: 62 | mini.reactions.get_by_id(i).lower_bound = mini.reactions.PGI.lower_bound 63 | # set names and annotation 64 | for g in mini.genes: 65 | try: 66 | tg = textbook.genes.get_by_id(g.id) 67 | except KeyError: 68 | continue 69 | g.name = tg.name 70 | g.annotation = tg.annotation 71 | mini.reactions.sort() 72 | mini.genes.sort() 73 | mini.metabolites.sort() 74 | # output to various formats 75 | with open("mini.pickle", "wb") as outfile: 76 | dump(mini, outfile, protocol=2) 77 | save_matlab_model(mini, "mini.mat") 78 | save_json_model(mini, "mini.json", pretty=True) 79 | write_sbml_model(mini, "mini_fbc2.xml") 80 | write_sbml_model(mini, "mini_fbc2.xml.bz2") 81 | write_sbml_model(mini, "mini_fbc2.xml.gz") 82 | write_sbml2(mini, "mini_fbc1.xml", use_fbc_package=True) 83 | write_sbml_model(mini, "mini_cobra.xml", use_fbc_package=False) 84 | 85 | # fva results 86 | fva_result = cobra.flux_analysis.flux_variability_analysis(textbook) 87 | clean_result = OrderedDict() 88 | for key in sorted(fva_result): 89 | clean_result[key] = {k: round(v, 5) for k, v in fva_result[key].items()} 90 | with open("textbook_fva.json", "w") as outfile: 91 | json_dump(clean_result, outfile) 92 | -------------------------------------------------------------------------------- /appveyor/run_with_env.cmd: -------------------------------------------------------------------------------- 1 | :: To build extensions for 64 bit Python 3, we need to configure environment 2 | :: variables to use the MSVC 2010 C++ compilers from GRMSDKX_EN_DVD.iso of: 3 | :: MS Windows SDK for Windows 7 and .NET Framework 4 (SDK v7.1) 4 | :: 5 | :: To build extensions for 64 bit Python 2, we need to configure environment 6 | :: variables to use the MSVC 2008 C++ compilers from GRMSDKX_EN_DVD.iso of: 7 | :: MS Windows SDK for Windows 7 and .NET Framework 3.5 (SDK v7.0) 8 | :: 9 | :: 32 bit builds, and 64-bit builds for 3.5 and beyond, do not require specific 10 | :: environment configurations. 11 | :: 12 | :: Note: this script needs to be run with the /E:ON and /V:ON flags for the 13 | :: cmd interpreter, at least for (SDK v7.0) 14 | :: 15 | :: More details at: 16 | :: https://github.com/cython/cython/wiki/64BitCythonExtensionsOnWindows 17 | :: http://stackoverflow.com/a/13751649/163740 18 | :: 19 | :: Author: Olivier Grisel 20 | :: License: CC0 1.0 Universal: http://creativecommons.org/publicdomain/zero/1.0/ 21 | :: 22 | :: Notes about batch files for Python people: 23 | :: 24 | :: Quotes in values are literally part of the values: 25 | :: SET FOO="bar" 26 | :: FOO is now five characters long: " b a r " 27 | :: If you don't want quotes, don't include them on the right-hand side. 28 | :: 29 | :: The CALL lines at the end of this file look redundant, but if you move them 30 | :: outside of the IF clauses, they do not run properly in the SET_SDK_64==Y 31 | :: case, I don't know why. 32 | @ECHO OFF 33 | 34 | SET COMMAND_TO_RUN=%* 35 | SET WIN_SDK_ROOT=C:\Program Files\Microsoft SDKs\Windows 36 | SET WIN_WDK=c:\Program Files (x86)\Windows Kits\10\Include\wdf 37 | 38 | :: Extract the major and minor versions, and allow for the minor version to be 39 | :: more than 9. This requires the version number to have two dots in it. 40 | SET MAJOR_PYTHON_VERSION=%PYTHON_VERSION:~0,1% 41 | IF "%PYTHON_VERSION:~3,1%" == "." ( 42 | SET MINOR_PYTHON_VERSION=%PYTHON_VERSION:~2,1% 43 | ) ELSE ( 44 | SET MINOR_PYTHON_VERSION=%PYTHON_VERSION:~2,2% 45 | ) 46 | 47 | :: Based on the Python version, determine what SDK version to use, and whether 48 | :: to set the SDK for 64-bit. 49 | IF %MAJOR_PYTHON_VERSION% == 2 ( 50 | SET WINDOWS_SDK_VERSION="v7.0" 51 | SET SET_SDK_64=Y 52 | ) ELSE ( 53 | IF %MAJOR_PYTHON_VERSION% == 3 ( 54 | SET WINDOWS_SDK_VERSION="v7.1" 55 | IF %MINOR_PYTHON_VERSION% LEQ 4 ( 56 | SET SET_SDK_64=Y 57 | ) ELSE ( 58 | SET SET_SDK_64=N 59 | IF EXIST "%WIN_WDK%" ( 60 | :: See: https://connect.microsoft.com/VisualStudio/feedback/details/1610302/ 61 | REN "%WIN_WDK%" 0wdf 62 | ) 63 | ) 64 | ) ELSE ( 65 | ECHO Unsupported Python version: "%MAJOR_PYTHON_VERSION%" 66 | EXIT 1 67 | ) 68 | ) 69 | 70 | IF %PYTHON_ARCH% == 64 ( 71 | IF %SET_SDK_64% == Y ( 72 | ECHO Configuring Windows SDK %WINDOWS_SDK_VERSION% for Python %MAJOR_PYTHON_VERSION% on a 64 bit architecture 73 | SET DISTUTILS_USE_SDK=1 74 | SET MSSdk=1 75 | "%WIN_SDK_ROOT%\%WINDOWS_SDK_VERSION%\Setup\WindowsSdkVer.exe" -q -version:%WINDOWS_SDK_VERSION% 76 | "%WIN_SDK_ROOT%\%WINDOWS_SDK_VERSION%\Bin\SetEnv.cmd" /x64 /release 77 | ECHO Executing: %COMMAND_TO_RUN% 78 | call %COMMAND_TO_RUN% || EXIT 1 79 | ) ELSE ( 80 | ECHO Using default MSVC build environment for 64 bit architecture 81 | ECHO Executing: %COMMAND_TO_RUN% 82 | call %COMMAND_TO_RUN% || EXIT 1 83 | ) 84 | ) ELSE ( 85 | ECHO Using default MSVC build environment for 32 bit architecture 86 | ECHO Executing: %COMMAND_TO_RUN% 87 | call %COMMAND_TO_RUN% || EXIT 1 88 | ) 89 | -------------------------------------------------------------------------------- /cobra/test/design.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase, TestLoader, TextTestRunner, skipIf 2 | 3 | import sys 4 | 5 | if __name__ == "__main__": 6 | sys.path.insert(0, "../..") 7 | from cobra.test import create_test_model, data_directory 8 | from cobra.design import * 9 | from cobra.design.design_algorithms import _add_decision_variable 10 | from cobra.solvers import get_solver_name 11 | sys.path.pop(0) 12 | else: 13 | from . import create_test_model, data_directory 14 | from ..design import * 15 | from ..design.design_algorithms import _add_decision_variable 16 | from ..solvers import get_solver_name 17 | 18 | try: 19 | solver = get_solver_name(mip=True) 20 | except: 21 | no_mip_solver = True 22 | else: 23 | no_mip_solver = False 24 | 25 | 26 | class TestDesignAlgorithms(TestCase): 27 | """Test functions in cobra.design""" 28 | 29 | def test_dual(self): 30 | model = create_test_model("textbook") 31 | self.assertAlmostEqual(model.optimize("maximize").f, 0.874, places=3) 32 | dual = dual_problem(model) 33 | self.assertAlmostEqual(dual.optimize("minimize").f, 0.874, places=3) 34 | 35 | def test_dual_integer_vars_as_lp(self): 36 | model = create_test_model("textbook") 37 | var = _add_decision_variable(model, "AKGDH") 38 | self.assertAlmostEqual(model.optimize("maximize").f, 0.874, places=3) 39 | # as lp: make integer continuous, set to 1 40 | dual = dual_problem(model, "maximize", [var.id], copy=True) 41 | r = dual.reactions.get_by_id(var.id) 42 | r.variable_kind = "continuous" 43 | r.lower_bound = r.upper_bound = 1 44 | self.assertAlmostEqual(dual.optimize("minimize").f, 0.874, places=3) 45 | r.lower_bound = r.upper_bound = 0 46 | self.assertAlmostEqual(dual.optimize("minimize").f, 0.858, places=3) 47 | 48 | @skipIf(no_mip_solver, "no MILP solver found") 49 | def test_dual_integer_vars_as_mip(self): 50 | # mip 51 | model = create_test_model("textbook") 52 | var = _add_decision_variable(model, "AKGDH") 53 | dual = dual_problem(model, "maximize", [var.id], copy=True) 54 | var_in_dual = dual.reactions.get_by_id(var.id) 55 | 56 | # minimization, so the optimal value state is to turn off AKGDH 57 | self.assertAlmostEqual(dual.optimize("minimize").f, 0.858, places=3) 58 | 59 | # turn off AKGDH in dual 60 | var_in_dual.lower_bound = var_in_dual.upper_bound = 1 61 | self.assertAlmostEqual(dual.optimize("minimize").f, 0.874, places=3) 62 | 63 | # turn on AKGDH in dual 64 | var_in_dual.lower_bound = var_in_dual.upper_bound = 0 65 | self.assertAlmostEqual(dual.optimize("minimize").f, 0.858, places=3) 66 | 67 | @skipIf(no_mip_solver, "no MILP solver found") 68 | def test_optknock(self): 69 | model = create_test_model("textbook") 70 | model.reactions.get_by_id("EX_o2_e").lower_bound = 0 71 | knockable_reactions = ["ACKr", "AKGDH", "ACALD", "LDH_D"] 72 | optknock_problem = set_up_optknock(model, "EX_lac__D_e", 73 | knockable_reactions, n_knockouts=2, 74 | copy=False) 75 | solution = run_optknock(optknock_problem, tolerance_integer=1e-9) 76 | self.assertIn("ACKr", solution.knockouts) 77 | self.assertIn("ACALD", solution.knockouts) 78 | self.assertAlmostEqual(solution.f, 17.891, places=3) 79 | 80 | # make a test suite to run all of the tests 81 | loader = TestLoader() 82 | suite = loader.loadTestsFromModule(sys.modules[__name__]) 83 | 84 | 85 | def test_all(): 86 | TextTestRunner(verbosity=2).run(suite) 87 | 88 | if __name__ == "__main__": 89 | test_all() 90 | -------------------------------------------------------------------------------- /cobra/flux_analysis/parsimonious.py: -------------------------------------------------------------------------------- 1 | from six import iteritems 2 | 3 | from ..manipulation.modify import convert_to_irreversible, revert_to_reversible 4 | from ..solvers import solver_dict, get_solver_name 5 | 6 | 7 | def optimize_minimal_flux(cobra_model, already_irreversible=False, 8 | fraction_of_optimum=1.0, solver=None, 9 | desired_objective_value=None, **optimize_kwargs): 10 | """Perform basic pFBA (parsimonius FBA) and minimize total flux. 11 | 12 | The function attempts to act as a drop-in replacement for optimize. It 13 | will make the reaction reversible and perform an optimization, then 14 | force the objective value to remain the same and minimize the total 15 | flux. Finally, it will convert the reaction back to the irreversible 16 | form it was in before. See http://dx.doi.org/10.1038/msb.2010.47 17 | 18 | cobra_model : :class:`~cobra.core.Model` object 19 | 20 | already_irreversible : bool, optional 21 | By default, the model is converted to an irreversible one. 22 | However, if the model is already irreversible, this step can be 23 | skipped 24 | 25 | fraction_of_optimum : float, optional 26 | Fraction of optimum which must be maintained. The original objective 27 | reaction is constrained to be greater than maximal_value * 28 | fraction_of_optimum. By default, this option is specified to be 1.0 29 | 30 | desired_objective_value : float, optional 31 | A desired objective value for the minimal solution that bypasses the 32 | initial optimization result. 33 | 34 | solver : string of solver name 35 | If None is given, the default solver will be used. 36 | 37 | Updates everything in-place, returns model to original state at end. 38 | """ 39 | 40 | if len(cobra_model.objective) > 1: 41 | raise ValueError('optimize_minimal_flux only supports models with' 42 | ' a single objective function') 43 | 44 | if 'objective_sense' in optimize_kwargs: 45 | if optimize_kwargs['objective_sense'] == 'minimize': 46 | raise ValueError( 47 | 'Minimization not supported in optimize_minimal_flux') 48 | optimize_kwargs.pop('objective_sense', None) 49 | 50 | # Convert to irreversible, so all reactions will have a positive flux 51 | convert_to_irreversible(cobra_model) 52 | 53 | solver = solver_dict[get_solver_name() if solver is None else solver] 54 | lp = solver.create_problem(cobra_model, **optimize_kwargs) 55 | if not desired_objective_value: 56 | solver.solve_problem(lp, objective_sense='maximize') 57 | status = solver.get_status(lp) 58 | if status != "optimal": 59 | raise ValueError( 60 | "pFBA requires optimal solution status, not {}".format(status)) 61 | desired_objective_value = solver.get_objective_value(lp) 62 | 63 | for i, reaction in enumerate(cobra_model.reactions): 64 | 65 | if reaction.objective_coefficient != 0: 66 | # Enforce a certain fraction of the original objective 67 | target = (desired_objective_value * fraction_of_optimum / 68 | reaction.objective_coefficient) 69 | solver.change_variable_bounds(lp, i, target, reaction.upper_bound) 70 | 71 | # Minimize all reaction fluxes (including objective?) 72 | solver.change_variable_objective(lp, i, 1) 73 | 74 | solver.solve_problem(lp, objective_sense='minimize', **optimize_kwargs) 75 | solution = solver.format_solution(lp, cobra_model) 76 | 77 | # Return the model to its original state 78 | cobra_model.solution = solution 79 | revert_to_reversible(cobra_model) 80 | 81 | if solution.status == "optimal": 82 | cobra_model.solution.f = sum([coeff * reaction.x for reaction, coeff in 83 | iteritems(cobra_model.objective)]) 84 | 85 | return solution 86 | -------------------------------------------------------------------------------- /cobra/test/data/invalid2.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | -------------------------------------------------------------------------------- /cobra/oven/danielhyduke/jython/scipy/sparse/sputils.py: -------------------------------------------------------------------------------- 1 | """ Utility functions for sparse matrix module 2 | """ 3 | 4 | __all__ = ['upcast','getdtype','isscalarlike','isintlike', 5 | 'isshape','issequence','isdense'] 6 | 7 | import numjy as np 8 | 9 | # keep this list syncronized with sparsetools 10 | #supported_dtypes = ['int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 11 | # 'int64', 'uint64', 'float32', 'float64', 12 | # 'complex64', 'complex128'] 13 | supported_dtypes = ['int8','uint8','short','ushort','intc','uintc', 14 | 'longlong','ulonglong','single','double','longdouble', 15 | 'csingle','cdouble','clongdouble'] 16 | supported_dtypes = [ np.typeDict[x] for x in supported_dtypes] 17 | 18 | def upcast(*args): 19 | """Returns the nearest supported sparse dtype for the 20 | combination of one or more types. 21 | 22 | upcast(t0, t1, ..., tn) -> T where T is a supported dtype 23 | 24 | Examples 25 | -------- 26 | 27 | >>> upcast('int32') 28 | 29 | >>> upcast('bool') 30 | 31 | >>> upcast('int32','float32') 32 | 33 | >>> upcast('bool',complex,float) 34 | 35 | 36 | """ 37 | sample = np.array([0],dtype=args[0]) 38 | for t in args[1:]: 39 | sample = sample + np.array([0],dtype=t) 40 | 41 | upcast = sample.dtype 42 | 43 | for t in supported_dtypes: 44 | if np.can_cast(sample.dtype,t): 45 | return t 46 | 47 | raise TypeError,'no supported conversion for types: %s' % args 48 | 49 | 50 | def to_native(A): 51 | return np.asarray(A,dtype=A.dtype.newbyteorder('native')) 52 | 53 | 54 | def getdtype(dtype, a=None, default=None): 55 | """Function used to simplify argument processing. If 'dtype' is not 56 | specified (is None), returns a.dtype; otherwise returns a np.dtype 57 | object created from the specified dtype argument. If 'dtype' and 'a' 58 | are both None, construct a data type out of the 'default' parameter. 59 | Furthermore, 'dtype' must be in 'allowed' set. 60 | """ 61 | #TODO is this really what we want? 62 | canCast = True 63 | if dtype is None: 64 | try: 65 | newdtype = a.dtype 66 | except AttributeError: 67 | if default is not None: 68 | newdtype = np.dtype(default) 69 | canCast = False 70 | else: 71 | raise TypeError, "could not interpret data type" 72 | else: 73 | newdtype = np.dtype(dtype) 74 | 75 | return newdtype 76 | 77 | def isscalarlike(x): 78 | """Is x either a scalar, an array scalar, or a 0-dim array?""" 79 | return np.isscalar(x) or (isdense(x) and x.ndim == 0) 80 | 81 | def isintlike(x): 82 | """Is x appropriate as an index into a sparse matrix? Returns True 83 | if it can be cast safely to a machine int. 84 | """ 85 | if issequence(x): 86 | return False 87 | else: 88 | try: 89 | if int(x) == x: 90 | return True 91 | else: 92 | return False 93 | except TypeError: 94 | return False 95 | 96 | def isshape(x): 97 | """Is x a valid 2-tuple of dimensions? 98 | """ 99 | try: 100 | # Assume it's a tuple of matrix dimensions (M, N) 101 | (M, N) = x 102 | except: 103 | return False 104 | else: 105 | if isintlike(M) and isintlike(N): 106 | if np.rank(M) == 0 and np.rank(N) == 0: 107 | return True 108 | return False 109 | 110 | 111 | def issequence(t): 112 | #Modded for numjy 113 | if isinstance(t, (list, tuple)): 114 | return True 115 | elif hassattr(t, '._M'): 116 | return (isinstance(t._M, np.ndarray) and (t.ndim == 1)) 117 | else: 118 | return False 119 | 120 | 121 | def _isinstance(x, _class): 122 | ## 123 | # This makes scipy.sparse.sparse.csc_matrix == __main__.csc_matrix. 124 | c1 = ('%s' % x.__class__).split( '.' ) 125 | c2 = ('%s' % _class).split( '.' ) 126 | aux = c1[-1] == c2[-1] 127 | return isinstance(x, _class) or aux 128 | 129 | def isdense(x): 130 | #Modded for numjy 131 | if hasattr(x,'_M'): 132 | return _isinstance(x._M, np.ndarray) 133 | raise Exception('The matrix must be created by numjy') 134 | -------------------------------------------------------------------------------- /cobra/test/data/textbook_fva.json: -------------------------------------------------------------------------------- 1 | {"ACALD": {"minimum": 0.0, "maximum": 0.0}, "ACALDt": {"minimum": 0.0, "maximum": -0.0}, "ACKr": {"minimum": 0.0, "maximum": -0.0}, "ACONTa": {"minimum": 6.00725, "maximum": 6.00725}, "ACONTb": {"minimum": 6.00725, "maximum": 6.00725}, "ACt2r": {"minimum": 0.0, "maximum": 0.0}, "ADK1": {"minimum": 0.0, "maximum": -0.0}, "AKGDH": {"minimum": 5.06438, "maximum": 5.06438}, "AKGt2r": {"minimum": 0.0, "maximum": 0.0}, "ALCD2x": {"minimum": 0.0, "maximum": 0.0}, "ATPM": {"minimum": 8.39, "maximum": 8.39}, "ATPS4r": {"minimum": 45.51401, "maximum": 45.51401}, "Biomass_Ecoli_core": {"minimum": 0.87392, "maximum": 0.87392}, "CO2t": {"minimum": -22.80983, "maximum": -22.80983}, "CS": {"minimum": 6.00725, "maximum": 6.00725}, "CYTBD": {"minimum": 43.59899, "maximum": 43.59899}, "D_LACt2": {"minimum": 0.0, "maximum": 0.0}, "ENO": {"minimum": 14.71614, "maximum": 14.71614}, "ETOHt2r": {"minimum": 0.0, "maximum": 0.0}, "EX_ac_e": {"minimum": 0.0, "maximum": -0.0}, "EX_acald_e": {"minimum": 0.0, "maximum": -0.0}, "EX_akg_e": {"minimum": 0.0, "maximum": -0.0}, "EX_co2_e": {"minimum": 22.80983, "maximum": 22.80983}, "EX_etoh_e": {"minimum": 0.0, "maximum": -0.0}, "EX_for_e": {"minimum": 0.0, "maximum": -0.0}, "EX_fru_e": {"minimum": 0.0, "maximum": -0.0}, "EX_fum_e": {"minimum": 0.0, "maximum": 0.0}, "EX_glc__D_e": {"minimum": -10.0, "maximum": -10.0}, "EX_gln__L_e": {"minimum": 0.0, "maximum": 0.0}, "EX_glu__L_e": {"minimum": 0.0, "maximum": -0.0}, "EX_h2o_e": {"minimum": 29.17583, "maximum": 29.17583}, "EX_h_e": {"minimum": 17.53087, "maximum": 17.53087}, "EX_lac__D_e": {"minimum": 0.0, "maximum": -0.0}, "EX_mal__L_e": {"minimum": 0.0, "maximum": -0.0}, "EX_nh4_e": {"minimum": -4.76532, "maximum": -4.76532}, "EX_o2_e": {"minimum": -21.79949, "maximum": -21.79949}, "EX_pi_e": {"minimum": -3.2149, "maximum": -3.2149}, "EX_pyr_e": {"minimum": 0.0, "maximum": -0.0}, "EX_succ_e": {"minimum": 0.0, "maximum": -0.0}, "FBA": {"minimum": 7.47738, "maximum": 7.47738}, "FBP": {"minimum": 0.0, "maximum": -0.0}, "FORt2": {"minimum": 0.0, "maximum": -0.0}, "FORti": {"minimum": 0.0, "maximum": -0.0}, "FRD7": {"minimum": 0.0, "maximum": 994.93562}, "FRUpts2": {"minimum": 0.0, "maximum": 0.0}, "FUM": {"minimum": 5.06438, "maximum": 5.06438}, "FUMt2_2": {"minimum": 0.0, "maximum": 0.0}, "G6PDH2r": {"minimum": 4.95998, "maximum": 4.95998}, "GAPD": {"minimum": 16.02353, "maximum": 16.02353}, "GLCpts": {"minimum": 10.0, "maximum": 10.0}, "GLNS": {"minimum": 0.22346, "maximum": 0.22346}, "GLNabc": {"minimum": 0.0, "maximum": 0.0}, "GLUDy": {"minimum": -4.54186, "maximum": -4.54186}, "GLUN": {"minimum": 0.0, "maximum": -0.0}, "GLUSy": {"minimum": 0.0, "maximum": -0.0}, "GLUt2r": {"minimum": 0.0, "maximum": -0.0}, "GND": {"minimum": 4.95998, "maximum": 4.95998}, "H2Ot": {"minimum": -29.17583, "maximum": -29.17583}, "ICDHyr": {"minimum": 6.00725, "maximum": 6.00725}, "ICL": {"minimum": 0.0, "maximum": -0.0}, "LDH_D": {"minimum": 0.0, "maximum": 0.0}, "MALS": {"minimum": 0.0, "maximum": -0.0}, "MALt2_2": {"minimum": 0.0, "maximum": -0.0}, "MDH": {"minimum": 5.06438, "maximum": 5.06438}, "ME1": {"minimum": 0.0, "maximum": -0.0}, "ME2": {"minimum": 0.0, "maximum": -0.0}, "NADH16": {"minimum": 38.53461, "maximum": 38.53461}, "NADTRHD": {"minimum": 0.0, "maximum": -0.0}, "NH4t": {"minimum": 4.76532, "maximum": 4.76532}, "O2t": {"minimum": 21.79949, "maximum": 21.79949}, "PDH": {"minimum": 9.28253, "maximum": 9.28253}, "PFK": {"minimum": 7.47738, "maximum": 7.47738}, "PFL": {"minimum": 0.0, "maximum": -0.0}, "PGI": {"minimum": 4.86086, "maximum": 4.86086}, "PGK": {"minimum": -16.02353, "maximum": -16.02353}, "PGL": {"minimum": 4.95998, "maximum": 4.95998}, "PGM": {"minimum": -14.71614, "maximum": -14.71614}, "PIt2r": {"minimum": 3.2149, "maximum": 3.2149}, "PPC": {"minimum": 2.50431, "maximum": 2.50431}, "PPCK": {"minimum": 0.0, "maximum": -0.0}, "PPS": {"minimum": 0.0, "maximum": -0.0}, "PTAr": {"minimum": -0.0, "maximum": -0.0}, "PYK": {"minimum": 1.75818, "maximum": 1.75818}, "PYRt2": {"minimum": 0.0, "maximum": -0.0}, "RPE": {"minimum": 2.67848, "maximum": 2.67848}, "RPI": {"minimum": -2.2815, "maximum": -2.2815}, "SUCCt2_2": {"minimum": 0.0, "maximum": -0.0}, "SUCCt3": {"minimum": 0.0, "maximum": -0.0}, "SUCDi": {"minimum": 5.06438, "maximum": 1000.0}, "SUCOAS": {"minimum": -5.06438, "maximum": -5.06438}, "TALA": {"minimum": 1.49698, "maximum": 1.49698}, "THD2": {"minimum": 0.0, "maximum": -0.0}, "TKT1": {"minimum": 1.49698, "maximum": 1.49698}, "TKT2": {"minimum": 1.1815, "maximum": 1.1815}, "TPI": {"minimum": 7.47738, "maximum": 7.47738}} -------------------------------------------------------------------------------- /cobra/solvers/__init__.py: -------------------------------------------------------------------------------- 1 | # Solvers are expected to follow the following interface 2 | # create_problem: makes a solver problem object from a cobra.model and 3 | # sets parameters (if possible) 4 | 5 | # format_solution: Returns a cobra.Solution object. This is where one 6 | # should dress the cobra.model with results if desired. 7 | 8 | # get_status: converts a solver specific status flag to a cobra pie flag. 9 | 10 | # set_parameter: takes solver specific parameter strings and sets them. 11 | 12 | # solve: solves the optimization problem. this is where one should put 13 | # in logic on what to try if the problem 14 | # isn't optimal 15 | 16 | # solve_problem: dumb and fast which will set parameters, if provided 17 | 18 | # update_problem: changes bounds and linear objective coefficient of the 19 | # solver specific problem file, given the complementary cobra.model 20 | 21 | # This attempts to import all working solvers in this directory 22 | 23 | from __future__ import absolute_import 24 | from warnings import warn 25 | from os import listdir, path 26 | 27 | solver_dict = {} 28 | possible_solvers = set() 29 | 30 | 31 | def add_solver(solver_name, use_name=None): 32 | """add a solver module to the solvers""" 33 | exec("from . import " + solver_name) 34 | solver = eval(solver_name) 35 | if use_name is None: 36 | if hasattr(solver, "solver_name"): 37 | use_name = solver.solver_name 38 | else: 39 | use_name = solver_name 40 | solver_dict[use_name] = eval(solver_name) 41 | 42 | for i in listdir(path.dirname(path.abspath(__file__))): 43 | if i.startswith("_") or i.startswith(".") or i.startswith('legacy'): 44 | continue 45 | if i.startswith("parameters"): 46 | continue 47 | if i.endswith(".py") or i.endswith(".so") or i.endswith(".pyc") \ 48 | or i.endswith(".pyd"): 49 | possible_solvers.add(i.split(".")[0]) 50 | 51 | if "wrappers" in possible_solvers: 52 | possible_solvers.remove("wrappers") 53 | 54 | for solver in possible_solvers: 55 | try: 56 | add_solver(solver) 57 | except: 58 | pass 59 | del solver 60 | 61 | if len(solver_dict) == 0: 62 | warn("No LP solvers found") 63 | 64 | # clean up the namespace 65 | del path, listdir, warn, i, possible_solvers 66 | 67 | 68 | class SolverNotFound(Exception): 69 | None 70 | 71 | 72 | def get_solver_name(mip=False, qp=False): 73 | """returns a solver name 74 | 75 | raises SolverNotFound if a suitable solver is not found 76 | """ 77 | if len(solver_dict) == 0: 78 | raise SolverNotFound("no solvers installed") 79 | # glpk only does lp, not qp. Gurobi and cplex are better at mip 80 | mip_order = ["gurobi", "cplex", "mosek", "coin", "cglpk", "glpk"] 81 | lp_order = ["cglpk", "cplex", "gurobi", "mosek", "coin", "glpk"] 82 | qp_order = ["gurobi", "cplex", "mosek"] 83 | 84 | if mip is False and qp is False: 85 | for solver_name in lp_order: 86 | if solver_name in solver_dict: 87 | return solver_name 88 | # none of them are in the list order - so return the first one 89 | return list(solver_dict)[0] 90 | elif qp: # mip does not yet matter for this determination 91 | for solver_name in qp_order: 92 | if solver_name in solver_dict: 93 | return solver_name 94 | # see if any solver defines set_quadratic_objective 95 | for solver_name in solver_dict: 96 | if hasattr(solver_dict[solver_name], "set_quadratic_objective"): 97 | return solver_name 98 | raise SolverNotFound("no qp-capable solver found") 99 | else: 100 | for solver_name in mip_order: 101 | if solver_name in solver_dict: 102 | return solver_name 103 | for solver_name in solver_dict: 104 | if hasattr(solver_dict[solver_name], "_SUPPORTS_MIP"): 105 | return solver_name 106 | raise SolverNotFound("no mip-capable solver found") 107 | 108 | 109 | def optimize(cobra_model, solver=None, **kwargs): 110 | """Wrapper to optimization solvers 111 | 112 | solver : str 113 | Name of the LP solver from solver_dict to use. If None is given, the 114 | default one will be used 115 | 116 | """ 117 | # If the default solver is not installed then use one of the others 118 | if solver is None: 119 | qp = "quadratic_component" in kwargs and \ 120 | kwargs["quadratic_component"] is not None 121 | solver = get_solver_name(qp=qp) 122 | 123 | return solver_dict[solver].solve(cobra_model, **kwargs) 124 | -------------------------------------------------------------------------------- /cobra/flux_analysis/variability.py: -------------------------------------------------------------------------------- 1 | from warnings import warn 2 | 3 | from six import iteritems 4 | from ..solvers import solver_dict, get_solver_name 5 | 6 | 7 | def flux_variability_analysis(cobra_model, reaction_list=None, 8 | fraction_of_optimum=1.0, solver=None, 9 | objective_sense="maximize", **solver_args): 10 | """Runs flux variability analysis to find max/min flux values 11 | 12 | cobra_model : :class:`~cobra.core.Model`: 13 | 14 | reaction_list : list of :class:`~cobra.core.Reaction`: or their id's 15 | The id's for which FVA should be run. If this is None, the bounds 16 | will be comptued for all reactions in the model. 17 | 18 | fraction_of_optimum : fraction of optimum which must be maintained. 19 | The original objective reaction is constrained to be greater than 20 | maximal_value * fraction_of_optimum 21 | 22 | solver : string of solver name 23 | If None is given, the default solver will be used. 24 | 25 | """ 26 | if reaction_list is None and "the_reactions" in solver_args: 27 | reaction_list = solver_args.pop("the_reactions") 28 | warn("the_reactions is deprecated. Please use reaction_list=") 29 | if reaction_list is None: 30 | reaction_list = cobra_model.reactions 31 | solver = solver_dict[get_solver_name() if solver is None else solver] 32 | lp = solver.create_problem(cobra_model) 33 | solver.solve_problem(lp, objective_sense=objective_sense) 34 | solution = solver.format_solution(lp, cobra_model) 35 | if solution.status != "optimal": 36 | raise ValueError("FVA requires the solution status to be optimal, " 37 | "not " + solution.status) 38 | # set all objective coefficients to 0 39 | for i, r in enumerate(cobra_model.reactions): 40 | if r.objective_coefficient != 0: 41 | f = solution.x_dict[r.id] 42 | new_bounds = (f * fraction_of_optimum, f) 43 | solver.change_variable_bounds(lp, i, 44 | min(new_bounds), max(new_bounds)) 45 | solver.change_variable_objective(lp, i, 0.) 46 | return calculate_lp_variability(lp, solver, cobra_model, reaction_list, 47 | **solver_args) 48 | 49 | 50 | def calculate_lp_variability(lp, solver, cobra_model, reaction_list, 51 | **solver_args): 52 | """calculate max and min of selected variables in an LP""" 53 | fva_results = {} 54 | for r in reaction_list: 55 | r_id = str(r) 56 | i = cobra_model.reactions.index(r_id) 57 | fva_results[r_id] = {} 58 | solver.change_variable_objective(lp, i, 1.) 59 | solver.solve_problem(lp, objective_sense="maximize", **solver_args) 60 | fva_results[r_id]["maximum"] = solver.get_objective_value(lp) 61 | solver.solve_problem(lp, objective_sense="minimize", **solver_args) 62 | fva_results[r_id]["minimum"] = solver.get_objective_value(lp) 63 | # revert the problem to how it was before 64 | solver.change_variable_objective(lp, i, 0.) 65 | return fva_results 66 | 67 | 68 | def find_blocked_reactions(cobra_model, reaction_list=None, 69 | solver=None, zero_cutoff=1e-9, 70 | open_exchanges=False, **solver_args): 71 | """Finds reactions that cannot carry a flux with the current 72 | exchange reaction settings for cobra_model, using flux variability 73 | analysis. 74 | 75 | """ 76 | if solver is None: 77 | solver = get_solver_name() 78 | if open_exchanges: 79 | # should not unnecessarily change model 80 | cobra_model = cobra_model.copy() 81 | for reaction in cobra_model.reactions: 82 | if reaction.boundary: 83 | reaction.lower_bound = min(reaction.lower_bound, -1000) 84 | reaction.upper_bound = max(reaction.upper_bound, 1000) 85 | if reaction_list is None: 86 | reaction_list = cobra_model.reactions 87 | # limit to reactions which are already 0. If the reactions alread 88 | # carry flux in this solution, then they can not be blocked. 89 | solution = solver_dict[solver].solve(cobra_model, **solver_args) 90 | reaction_list = [i for i in reaction_list 91 | if abs(solution.x_dict[i.id]) < zero_cutoff] 92 | # run fva to find reactions where both max and min are 0 93 | flux_span_dict = flux_variability_analysis( 94 | cobra_model, fraction_of_optimum=0., reaction_list=reaction_list, 95 | solver=solver, **solver_args) 96 | return [k for k, v in iteritems(flux_span_dict) 97 | if max(map(abs, v.values())) < zero_cutoff] 98 | -------------------------------------------------------------------------------- /INSTALL.md: -------------------------------------------------------------------------------- 1 | #Installation of cobrapy 2 | 3 | For installation help, please use the 4 | [Google Group](http://groups.google.com/group/cobra-pie). 5 | For usage instructions, please see the 6 | [documentation](https://cobrapy.readthedocs.org/en/latest/). 7 | 8 | -------------------------------------------------------------------------------- 9 | 10 | All releases require Python 2.7+ or 3.4+ to be installed before proceeding. 11 | Mac OS X (10.7+) and Ubuntu ship with Python. Windows users without python 12 | can download and install python from the [python 13 | website](https://www.python.org/ftp/python/2.7.9/python-2.7.9.amd64.msi). 14 | Please note that though Anaconda and other python distributions may work with 15 | cobrapy, they are not explicitly supported (yet!). 16 | 17 | ## Stable version installation 18 | 19 | cobrapy can be installed with any recent installation of pip. Instructions 20 | for several operating systems are below: 21 | 22 | ### Mac OS X or Linux 23 | 0. [install pip](http://pip.readthedocs.org/en/latest/installing.html). 24 | 1. In a terminal, run ```sudo pip install cobra``` 25 | 26 | ### Microsoft Windows 27 | The preferred installation method on Windows is also to use pip. The latest 28 | Windows installers for Python 2.7 and 3.4 include pip, so if you use those you 29 | will already have pip. 30 | 31 | 1. In a terminal, run ```C:\Python27\Scripts\pip.exe install cobra``` 32 | (you may need to adjust the path accordingly). 33 | 34 | To install without pip, you will need to download and use the appropriate 35 | installer for your version of python from the [python package 36 | index](https://pypi.python.org/pypi/cobra/). 37 | 38 | 39 | ## Hacking version installation 40 | Use pip to install [Cython](http://cython.org/). Install libglpk 41 | using your package manger. This would be 42 | ```brew install homebrew/science/glpk``` on a Mac 43 | and ```sudo apt-get install libglpk-dev``` on debian-based systems 44 | (including Ubuntu and Mint). GLPK can also be compiled from the 45 | released source. 46 | 47 | Clone the git repository using your preferred mothod. Cloning from your 48 | own [github fork](https://help.github.com/articles/fork-a-repo) is recommended! 49 | Afterwards, open a terminal, enter the cobrapy repository and run the following 50 | command: 51 | 52 | python setup.py develop --user 53 | 54 | # Installation of optional dependencies 55 | ## Optional Dependencies 56 | On windows, these can downloaded from [this site] 57 | (http://www.lfd.uci.edu/~gohlke/pythonlibs/). On Mac/Linux, they can be 58 | installed using pip, or from the OS package manager (e.g brew, apt, yum). 59 | 60 | 1. [libsbml](http://sbml.org) >= 5.10 to read/write SBML level 2 files 61 | * [Windows installer](http://www.lfd.uci.edu/~gohlke/pythonlibs/#libsbml) 62 | * Use ```sudo pip install python-libsbml``` on Mac/Linux 63 | 2. [lxml](http://lxml.de/) to speed up read/write of SBML level 3 files. 64 | 3. [numpy](http://numpy.org) >= 1.6.1 for double deletions 65 | * [Windows installer](http://www.lfd.uci.edu/~gohlke/pythonlibs/#numpy) 66 | 4. [scipy](http://scipy.org) >= 0.11 for ArrayBasedModel and saving to *.mat files. 67 | * [Windows installer](http://www.lfd.uci.edu/~gohlke/pythonlibs/#scipy) 68 | 69 | ## Other solvers 70 | cobrapy comes with bindings to the GNU Linear Programming Kit ([glpk] 71 | (http://www.gnu.org/software/glpk/)) using its own bindings called "cglpk" in 72 | cobrapy. In addition, cobrapy currently supports these linear programming 73 | solvers: 74 | 75 | * ILOG/CPLEX (available with 76 | [Academic](https://www.ibm.com/developerworks/university/academicinitiative/) 77 | and 78 | [Commercial](http://www.ibm.com/software/integration/optimization/cplex-optimizer/) 79 | licenses). 80 | * [gurobi](http://gurobi.com) 81 | * [QSopt_ex esolver](http://www.dii.uchile.cl/~daespino/ESolver_doc/main.html) 82 | * [MOSEK](http://www.mosek.com/) 83 | * [coin-or clp and cbc](http://coin-or.org/) through 84 | [cylp](https://github.com/coin-or/CyLP). 85 | 86 | ILOG/CPLEX, MOSEK, and Gurobi are commercial software packages that currently 87 | provide free licenses for academics and support both linear and quadratic 88 | programming. GLPK and clp are open source linear programming solvers; however, 89 | they may not be as robsut as the commercial solvers for mixed-integer and 90 | quadratic programming. QSopt_ex esolver is also open source, and can solve 91 | linear programs using rational operations, giving exact solutions. 92 | 93 | 94 | # Testing your installation 95 | 1. Start python 96 | 2. Type the following into the Python shell 97 | 98 | ```python 99 | from cobra.test import test_all 100 | test_all() 101 | ``` 102 | 103 | You should see some skipped tests and expected failures, and the function should return ```False```. 104 | 105 | -------------------------------------------------------------------------------- /cobra/solvers/coin.py: -------------------------------------------------------------------------------- 1 | from cylp.cy import CyClpSimplex 2 | from cylp.py.modeling.CyLPModel import CyLPArray 3 | from cylp.cy.CyCoinPackedMatrix import CyCoinPackedMatrix 4 | 5 | solver_name = "coin" 6 | _status_translation = {"primal infeasible": "infeasible", 7 | "solution": "optimal"} 8 | 9 | _SUPPORTS_MILP = True 10 | 11 | 12 | class Coin(CyClpSimplex): 13 | cbc = None 14 | 15 | @property 16 | def status_(self): 17 | return self.cbc.status if self.cbc else self.getStatusString() 18 | 19 | @property 20 | def primalVariableSolution_(self): 21 | return self.cbc.primalVariableSolution if self.cbc \ 22 | else self.primalVariableSolution 23 | 24 | @property 25 | def objectiveValue_(self): 26 | return self.cbc.objectiveValue if self.cbc else self.objectiveValue 27 | 28 | 29 | def create_problem(cobra_model, objective_sense="maximize", **kwargs): 30 | m = cobra_model.to_array_based_model(deepcopy_model=True) 31 | lp = Coin() 32 | v = lp.addVariable("v", len(m.reactions)) 33 | for i, rxn in enumerate(m.reactions): 34 | if rxn.variable_kind == "integer": 35 | lp.setInteger(v[i]) 36 | S = m.S 37 | v.lower = CyLPArray(m.lower_bounds) 38 | v.upper = CyLPArray(m.upper_bounds) 39 | inf = float("inf") 40 | cons = zip(m.b, m.constraint_sense) 41 | b_l = CyLPArray([-inf if s == "L" else b for b, s in cons]) 42 | b_u = CyLPArray([inf if s == "G" else b for b, s in cons]) 43 | lp.addConstraint(b_u >= S * v >= b_l, "b") 44 | lp.objectiveCoefficients = CyLPArray(m.objective_coefficients) 45 | set_parameter(lp, "objective_sense", objective_sense) 46 | set_parameter(lp, "tolerance_feasibility", 1e-9) 47 | lp.logLevel = 0 48 | for key, value in kwargs.items(): 49 | set_parameter(lp, key, value) 50 | return lp 51 | 52 | 53 | def solve(cobra_model, **kwargs): 54 | lp = create_problem(cobra_model) 55 | for key, value in kwargs.items(): 56 | set_parameter(lp, key, value) 57 | solve_problem(lp) 58 | return format_solution(lp, cobra_model) 59 | 60 | 61 | def set_parameter(lp, parameter_name, value): 62 | if parameter_name == "objective_sense": 63 | v = str(value).lower() 64 | if v == "maximize": 65 | lp.optimizationDirection = "max" 66 | elif v == "minimize": 67 | lp.optimizationDirection = "min" 68 | else: 69 | raise ValueError("unknown objective sense '%s'" % value) 70 | elif parameter_name == "tolerance_feasibility": 71 | lp.primalTolerance = value 72 | elif parameter_name == "verbose": 73 | lp.logLevel = value 74 | elif parameter_name == "quadratic_component": 75 | set_quadratic_objective(lp, value) 76 | else: 77 | setattr(lp, parameter_name, value) 78 | 79 | 80 | def solve_problem(lp, **kwargs): 81 | for key, value in kwargs.items(): 82 | set_parameter(lp, key, value) 83 | if max(lp.integerInformation): 84 | lp.cbc = lp.getCbcModel() 85 | lp.cbc.logLevel = lp.logLevel 86 | return lp.cbc.branchAndBound() 87 | else: 88 | lp.cbc = None 89 | return lp.primal() 90 | 91 | 92 | def format_solution(lp, cobra_model): 93 | Solution = cobra_model.solution.__class__ 94 | status = get_status(lp) 95 | if status != "optimal": # todo handle other possible 96 | return Solution(None, status=status) 97 | solution = Solution(lp.objectiveValue_, status=status) 98 | x = lp.primalVariableSolution_["v"].tolist() 99 | solution.x_dict = {r.id: x[i] for i, r in enumerate(cobra_model.reactions)} 100 | solution.x = x 101 | # TODO handle y 102 | 103 | return solution 104 | 105 | 106 | def get_status(lp): 107 | status = lp.status_ 108 | return _status_translation.get(status, status) 109 | 110 | 111 | def get_objective_value(lp): 112 | return lp.objectiveValue_ 113 | 114 | 115 | def change_variable_bounds(lp, index, lower_bound, upper_bound): 116 | lp.variablesLower[index] = lower_bound 117 | lp.variablesUpper[index] = upper_bound 118 | 119 | 120 | def change_coefficient(lp, met_index, rxn_index, value): 121 | S = lp.coefMatrix 122 | S[met_index, rxn_index] = value 123 | lp.coefMatrix = S 124 | 125 | 126 | def change_variable_objective(lp, index, value): 127 | lp.setObjectiveCoefficient(index, value) 128 | 129 | 130 | def _set_quadratic_objective(lp, quadratic_objective): 131 | """The quadratic routines in CLP do not yet work for GEMs""" 132 | if not hasattr(quadratic_objective, "tocoo"): 133 | raise Exception('quadratic component must have method tocoo') 134 | coo = quadratic_objective.tocoo() 135 | matrix = CyCoinPackedMatrix(True, coo.row, coo.col, coo.data) 136 | lp.loadQuadraticObjective(matrix) 137 | -------------------------------------------------------------------------------- /cobra/flux_analysis/moma.py: -------------------------------------------------------------------------------- 1 | from scipy.sparse import dok_matrix 2 | 3 | from ..solvers import get_solver_name, solver_dict 4 | 5 | 6 | def create_euclidian_moma_model(cobra_model, wt_model=None, **solver_args): 7 | # make the wild type copy if none was supplied 8 | if wt_model is None: 9 | wt_model = cobra_model.copy() 10 | else: 11 | wt_model = wt_model.copy() 12 | # ensure single objective 13 | wt_obj = wt_model.reactions.query(lambda x: x > 0, 14 | "objective_coefficient") 15 | if len(wt_obj) != 1: 16 | raise ValueError("wt_model must have exactly 1 objective, %d found" 17 | % len(wt_obj)) 18 | 19 | obj = cobra_model.reactions.query(lambda x: x > 0, "objective_coefficient") 20 | if len(obj) == 1: 21 | objective_id = obj[0].id 22 | else: 23 | raise ValueError("model must have exactly 1 objective, %d found" % 24 | len(obj)) 25 | 26 | wt_model.optimize(**solver_args) 27 | for reaction in wt_model.reactions: 28 | # we don't want delete_model_gene to remove the wt reaction as well 29 | reaction.gene_reaction_rule = '' 30 | if reaction.objective_coefficient != 0: 31 | reaction.objective_coefficient = 0 32 | reaction.upper_bound = reaction.lower_bound = reaction.x 33 | reaction.id = "MOMA_wt_" + reaction.id 34 | for metabolite in wt_model.metabolites: 35 | metabolite.id = "MOMA_wt_" + metabolite.id 36 | wt_model.repair() 37 | 38 | # make the moma model by combining both 39 | moma_model = cobra_model.copy() 40 | for reaction in moma_model.reactions: 41 | reaction.objective_coefficient = 0 42 | moma_model.add_reactions(wt_model.reactions) 43 | return moma_model, objective_id 44 | 45 | 46 | def create_euclidian_distance_objective(n_moma_reactions): 47 | """returns a matrix which will minimze the euclidian distance 48 | 49 | This matrix has the structure 50 | [ I -I] 51 | [-I I] 52 | where I is the identity matrix the same size as the number of 53 | reactions in the original model. 54 | 55 | n_moma_reactions: int 56 | This is the number of reactions in the MOMA model, which should 57 | be twice the number of reactions in the original model""" 58 | if n_moma_reactions % 2 != 0: 59 | raise ValueError("must be even") 60 | n_reactions = n_moma_reactions // 2 61 | Q = dok_matrix((n_reactions * 2, n_reactions * 2)) 62 | for i in range(2 * n_reactions): 63 | Q[i, i] = 1 64 | for i in range(n_reactions): 65 | Q[i, n_reactions + i] = -1 66 | Q[n_reactions + i, i] = -1 67 | return Q 68 | 69 | 70 | def create_euclidian_distance_lp(moma_model, solver): 71 | Q = create_euclidian_distance_objective(len(moma_model.reactions)) 72 | lp = solver.create_problem(moma_model, objective_sense="minimize", 73 | quadratic_component=Q) 74 | return lp 75 | 76 | 77 | def solve_moma_model(moma_model, objective_id, solver=None, **solver_args): 78 | solver = solver_dict[solver if solver and isinstance(solver, str) 79 | else get_solver_name(qp=True)] 80 | lp = create_euclidian_distance_lp(moma_model, solver=solver) 81 | solver.solve_problem(lp, **solver_args) 82 | solution = solver.format_solution(lp, moma_model) 83 | solution.f = 0. if solution.x_dict is None \ 84 | else solution.x_dict[objective_id] 85 | moma_model.solution = solution 86 | return solution 87 | 88 | 89 | def moma(wt_model, mutant_model, solver=None, **solver_args): 90 | if "norm_type" in solver_args: 91 | print("only euclidian norm type supported for moma") 92 | solver_args.pop("norm_type") 93 | moma_model, objective_id = create_euclidian_moma_model(mutant_model, 94 | wt_model) 95 | return solve_moma_model(moma_model, objective_id, 96 | solver=solver, **solver_args) 97 | 98 | 99 | def moma_knockout(moma_model, moma_objective, reaction_indexes, **moma_args): 100 | """computes result of reaction_knockouts using moma""" 101 | n = len(moma_model.reactions) // 2 102 | # knock out the reaction 103 | for i in reaction_indexes: 104 | mutant_reaction = moma_model.reactions[i] 105 | mutant_reaction.lower_bound, mutant_reaction.upper_bound = (0., 0.) 106 | result = solve_moma_model(moma_model, moma_objective, **moma_args) 107 | # reset the knockouts 108 | for i in reaction_indexes: 109 | mutant_reaction = moma_model.reactions[i] 110 | wt_reaction = moma_model.reactions[n + i] 111 | mutant_reaction.lower_bound = wt_reaction.lower_bound 112 | mutant_reaction.upper_bound = wt_reaction.upper_bound 113 | return result 114 | -------------------------------------------------------------------------------- /documentation_builder/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # cobra documentation build configuration file, created by 4 | # sphinx-quickstart on Wed Jun 13 19:17:34 2012. 5 | # 6 | # This file is execfile()d with the current directory set to its containing dir. 7 | # 8 | # Note that not all possible configuration values are present in this 9 | # autogenerated file. 10 | # 11 | # All configuration values have a default; values that are commented out 12 | # serve to show the default. 13 | 14 | import sys 15 | import os 16 | 17 | # If extensions (or modules to document with autodoc) are in another directory, 18 | # add these directories to sys.path here. If the directory is relative to the 19 | # documentation root, use os.path.abspath to make it absolute, like shown here. 20 | sys.path.insert(0, os.path.abspath('..')) 21 | 22 | 23 | # In order to build documentation that requires libraries to import 24 | class Mock(object): 25 | def __init__(self, *args, **kwargs): 26 | return 27 | 28 | def __call__(self, *args, **kwargs): 29 | return Mock() 30 | 31 | @classmethod 32 | def __getattr__(cls, name): 33 | if name in ('__file__', '__path__'): 34 | return '/dev/null' 35 | else: 36 | return Mock() 37 | 38 | MOCK_MODULES = ['numpy', 'scipy', 'scipy.sparse', 'scipy.io', 'scipy.stats', 39 | 'glpk', 'gurobipy', 'gurobipy.GRB', 'cplex', 'pp', 'libsbml', 40 | 'cplex.exceptions', 'pandas'] 41 | for mod_name in MOCK_MODULES: 42 | sys.modules[mod_name] = Mock() 43 | 44 | # -- General configuration ---------------------------------------------------- 45 | 46 | # Add any Sphinx extension module names here, as strings. They can be 47 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 48 | extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 49 | 'sphinx.ext.mathjax', 'sphinx.ext.viewcode', 50 | 'sphinx.ext.napoleon', 'sphinx.ext.mathjax', 'nbsphinx'] 51 | 52 | # The master toctree document. 53 | master_doc = 'index' 54 | 55 | # General information about the project. 56 | project = u'cobra' 57 | copyright = u'2016, Daniel Robert Hyduke and Ali Ebrahim' 58 | 59 | # The version info for the project you're documenting, acts as replacement for 60 | # |version| and |release|, also used in various other places throughout the 61 | # built documents. 62 | # 63 | # The short X.Y version. 64 | from cobra.version import get_version, read_release_version 65 | version = read_release_version() 66 | # The full version, including alpha/beta/rc tags. 67 | release = get_version() 68 | 69 | # List of patterns, relative to source directory, that match files and 70 | # directories to ignore when looking for source files. 71 | exclude_patterns = ['_build', 'version.py', '.ipynb_checkpoints'] 72 | 73 | pygments_style = 'sphinx' 74 | 75 | 76 | # -- Options for HTML output -------------------------------------------------- 77 | 78 | mathjax_path = 'https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML' 79 | 80 | # -- Options for LaTeX output -------------------------------------------------- 81 | 82 | latex_elements = { 83 | # The paper size ('letterpaper' or 'a4paper'). 84 | 'papersize': 'letterpaper', 85 | 86 | # The font size ('10pt', '11pt' or '12pt'). 87 | #'pointsize': '10pt', 88 | 89 | # Additional stuff for the LaTeX preamble. 90 | 'preamble': r'\usepackage{amsmath,amssymb}', 91 | } 92 | 93 | # Grouping the document tree into LaTeX files. List of tuples 94 | # (source start file, target name, title, author, documentclass [howto/manual]). 95 | latex_documents = [ 96 | ('index', 'cobra.tex', u'cobra Documentation', 97 | u'Daniel Robert Hyduke and Ali Ebrahim', 'manual'), 98 | ] 99 | 100 | # -- Options for manual page output -------------------------------------------- 101 | 102 | # One entry per manual page. List of tuples 103 | # (source start file, name, description, authors, manual section). 104 | man_pages = [ 105 | ('index', 'cobra', u'cobra Documentation', 106 | [u'Daniel Robert Hyduke and Ali Ebrahim'], 1) 107 | ] 108 | 109 | # -- Options for Texinfo output ------------------------------------------------ 110 | 111 | # Grouping the document tree into Texinfo files. List of tuples 112 | # (source start file, target name, title, author, 113 | # dir menu entry, description, category) 114 | texinfo_documents = [ 115 | ('index', 'cobra', u'cobra Documentation', 116 | u'Daniel Robert Hyduke and Ali Ebrahim', 'cobra', 117 | 'A package for constraints-based modeling of biological networks', 118 | 'Miscellaneous'), 119 | ] 120 | 121 | # Example configuration for intersphinx: refer to the Python standard library. 122 | intersphinx_mapping = {"http://docs.python.org/": None, 123 | "http://docs.scipy.org/doc/numpy/": None, 124 | "http://docs.scipy.org/doc/scipy/reference": None} 125 | intersphinx_cache_limit = 10 # days to keep the cached inventories 126 | -------------------------------------------------------------------------------- /cobra/version.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Get version identification from git 3 | 4 | See the documentation of get_version for more information 5 | 6 | """ 7 | from __future__ import print_function 8 | 9 | from subprocess import check_output, CalledProcessError 10 | from os import path, name, devnull, environ, listdir 11 | 12 | __all__ = ("get_version",) 13 | 14 | CURRENT_DIRECTORY = path.dirname(path.abspath(__file__)) 15 | VERSION_FILE = path.join(CURRENT_DIRECTORY, "VERSION") 16 | 17 | GIT_COMMAND = "git" 18 | 19 | if name == "nt": 20 | def find_git_on_windows(): 21 | """find the path to the git executable on windows""" 22 | # first see if git is in the path 23 | try: 24 | check_output(["where", "/Q", "git"]) 25 | # if this command succeeded, git is in the path 26 | return "git" 27 | # catch the exception thrown if git was not found 28 | except CalledProcessError: 29 | pass 30 | # There are several locations git.exe may be hiding 31 | possible_locations = [] 32 | # look in program files for msysgit 33 | if "PROGRAMFILES(X86)" in environ: 34 | possible_locations.append("%s/Git/cmd/git.exe" % 35 | environ["PROGRAMFILES(X86)"]) 36 | if "PROGRAMFILES" in environ: 37 | possible_locations.append("%s/Git/cmd/git.exe" % 38 | environ["PROGRAMFILES"]) 39 | # look for the github version of git 40 | if "LOCALAPPDATA" in environ: 41 | github_dir = "%s/GitHub" % environ["LOCALAPPDATA"] 42 | if path.isdir(github_dir): 43 | for subdir in listdir(github_dir): 44 | if not subdir.startswith("PortableGit"): 45 | continue 46 | possible_locations.append("%s/%s/bin/git.exe" % 47 | (github_dir, subdir)) 48 | for possible_location in possible_locations: 49 | if path.isfile(possible_location): 50 | return possible_location 51 | # git was not found 52 | return "git" 53 | 54 | GIT_COMMAND = find_git_on_windows() 55 | 56 | 57 | def call_git_describe(abbrev=7): 58 | """return the string output of git desribe""" 59 | try: 60 | with open(devnull, "w") as fnull: 61 | arguments = [GIT_COMMAND, "describe", "--tags", 62 | "--abbrev=%d" % abbrev] 63 | return check_output(arguments, cwd=CURRENT_DIRECTORY, 64 | stderr=fnull).decode("ascii").strip() 65 | except (OSError, CalledProcessError): 66 | return None 67 | 68 | 69 | def format_git_describe(git_str, pep440=False): 70 | """format the result of calling 'git describe' as a python version""" 71 | if git_str is None: 72 | return None 73 | if "-" not in git_str: # currently at a tag 74 | return git_str 75 | else: 76 | # formatted as version-N-githash 77 | # want to convert to version.postN-githash 78 | git_str = git_str.replace("-", ".post", 1) 79 | if pep440: # does not allow git hash afterwards 80 | return git_str.split("-")[0] 81 | else: 82 | return git_str.replace("-g", "+git") 83 | 84 | 85 | def read_release_version(): 86 | """Read version information from VERSION file""" 87 | try: 88 | with open(VERSION_FILE, "r") as infile: 89 | version = str(infile.read().strip()) 90 | if len(version) == 0: 91 | version = None 92 | return version 93 | except IOError: 94 | return None 95 | 96 | 97 | def update_release_version(): 98 | """Update VERSION file""" 99 | version = get_version(pep440=True) 100 | with open(VERSION_FILE, "w") as outfile: 101 | outfile.write(version) 102 | outfile.write("\n") 103 | 104 | 105 | def get_version(pep440=False): 106 | """Tracks the version number. 107 | 108 | pep440: bool 109 | When True, this function returns a version string suitable for 110 | a release as defined by PEP 440. When False, the githash (if 111 | available) will be appended to the version string. 112 | 113 | The file VERSION holds the version information. If this is not a git 114 | repository, then it is reasonable to assume that the version is not 115 | being incremented and the version returned will be the release version as 116 | read from the file. 117 | 118 | However, if the script is located within an active git repository, 119 | git-describe is used to get the version information. 120 | 121 | The file VERSION will need to be changed by manually. This should be done 122 | before running git tag (set to the same as the version in the tag). 123 | 124 | """ 125 | 126 | git_version = format_git_describe(call_git_describe(), pep440=pep440) 127 | if git_version is None: # not a git repository 128 | return read_release_version() 129 | return git_version 130 | 131 | 132 | if __name__ == "__main__": 133 | print(get_version()) 134 | -------------------------------------------------------------------------------- /documentation_builder/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=..\documentation 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . 10 | set I18NSPHINXOPTS=%SPHINXOPTS% . 11 | if NOT "%PAPER%" == "" ( 12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% 14 | ) 15 | 16 | if "%1" == "" goto help 17 | 18 | if "%1" == "help" ( 19 | :help 20 | echo.Please use `make ^` where ^ is one of 21 | echo. html to make standalone HTML files 22 | echo. dirhtml to make HTML files named index.html in directories 23 | echo. singlehtml to make a single large HTML file 24 | echo. pickle to make pickle files 25 | echo. json to make JSON files 26 | echo. htmlhelp to make HTML files and a HTML help project 27 | echo. qthelp to make HTML files and a qthelp project 28 | echo. devhelp to make HTML files and a Devhelp project 29 | echo. epub to make an epub 30 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 31 | echo. text to make text files 32 | echo. man to make manual pages 33 | echo. texinfo to make Texinfo files 34 | echo. gettext to make PO message catalogs 35 | echo. changes to make an overview over all changed/added/deprecated items 36 | echo. linkcheck to check all external links for integrity 37 | echo. doctest to run all doctests embedded in the documentation if enabled 38 | goto end 39 | ) 40 | 41 | if "%1" == "clean" ( 42 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 43 | del /q /s %BUILDDIR%\* 44 | goto end 45 | ) 46 | 47 | if "%1" == "html" ( 48 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 49 | if errorlevel 1 exit /b 1 50 | echo. 51 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 52 | goto end 53 | ) 54 | 55 | if "%1" == "dirhtml" ( 56 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 57 | if errorlevel 1 exit /b 1 58 | echo. 59 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 60 | goto end 61 | ) 62 | 63 | if "%1" == "singlehtml" ( 64 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 65 | if errorlevel 1 exit /b 1 66 | echo. 67 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 68 | goto end 69 | ) 70 | 71 | if "%1" == "pickle" ( 72 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 73 | if errorlevel 1 exit /b 1 74 | echo. 75 | echo.Build finished; now you can process the pickle files. 76 | goto end 77 | ) 78 | 79 | if "%1" == "json" ( 80 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 81 | if errorlevel 1 exit /b 1 82 | echo. 83 | echo.Build finished; now you can process the JSON files. 84 | goto end 85 | ) 86 | 87 | if "%1" == "htmlhelp" ( 88 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 89 | if errorlevel 1 exit /b 1 90 | echo. 91 | echo.Build finished; now you can run HTML Help Workshop with the ^ 92 | .hhp project file in %BUILDDIR%/htmlhelp. 93 | goto end 94 | ) 95 | 96 | if "%1" == "qthelp" ( 97 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 98 | if errorlevel 1 exit /b 1 99 | echo. 100 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 101 | .qhcp project file in %BUILDDIR%/qthelp, like this: 102 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\cobra.qhcp 103 | echo.To view the help file: 104 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\cobra.ghc 105 | goto end 106 | ) 107 | 108 | if "%1" == "devhelp" ( 109 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 110 | if errorlevel 1 exit /b 1 111 | echo. 112 | echo.Build finished. 113 | goto end 114 | ) 115 | 116 | if "%1" == "epub" ( 117 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 118 | if errorlevel 1 exit /b 1 119 | echo. 120 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 121 | goto end 122 | ) 123 | 124 | if "%1" == "latex" ( 125 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 126 | if errorlevel 1 exit /b 1 127 | echo. 128 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 129 | goto end 130 | ) 131 | 132 | if "%1" == "text" ( 133 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 134 | if errorlevel 1 exit /b 1 135 | echo. 136 | echo.Build finished. The text files are in %BUILDDIR%/text. 137 | goto end 138 | ) 139 | 140 | if "%1" == "man" ( 141 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 142 | if errorlevel 1 exit /b 1 143 | echo. 144 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 145 | goto end 146 | ) 147 | 148 | if "%1" == "texinfo" ( 149 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo 150 | if errorlevel 1 exit /b 1 151 | echo. 152 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. 153 | goto end 154 | ) 155 | 156 | if "%1" == "gettext" ( 157 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale 158 | if errorlevel 1 exit /b 1 159 | echo. 160 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale. 161 | goto end 162 | ) 163 | 164 | if "%1" == "changes" ( 165 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 166 | if errorlevel 1 exit /b 1 167 | echo. 168 | echo.The overview file is in %BUILDDIR%/changes. 169 | goto end 170 | ) 171 | 172 | if "%1" == "linkcheck" ( 173 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 174 | if errorlevel 1 exit /b 1 175 | echo. 176 | echo.Link check complete; look for any errors in the above output ^ 177 | or in %BUILDDIR%/linkcheck/output.txt. 178 | goto end 179 | ) 180 | 181 | if "%1" == "doctest" ( 182 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 183 | if errorlevel 1 exit /b 1 184 | echo. 185 | echo.Testing of doctests in the sources finished, look at the ^ 186 | results in %BUILDDIR%/doctest/output.txt. 187 | goto end 188 | ) 189 | 190 | :end 191 | -------------------------------------------------------------------------------- /cobra/core/Formula.py: -------------------------------------------------------------------------------- 1 | import re 2 | from warnings import warn 3 | 4 | from .Object import Object 5 | 6 | # Numbers are not required because of the |(?=[A-Z])? block. See the 7 | # discussion in https://github.com/opencobra/cobrapy/issues/128 for 8 | # more details. 9 | element_re = re.compile("([A-Z][a-z]?)([0-9.]+[0-9.]?|(?=[A-Z])?)") 10 | 11 | 12 | class Formula(Object): 13 | """Describes a Chemical Formula 14 | 15 | A legal formula string contains only letters and numbers. 16 | 17 | """ 18 | def __init__(self, formula=None): 19 | Object.__init__(self, formula) 20 | self.formula = formula 21 | self.elements = {} 22 | if self.formula is not None: 23 | self.parse_composition() 24 | 25 | def __add__(self, other_formula): 26 | """Combine two molecular formulas. 27 | 28 | other_formula: cobra.Formula or str of a chemical Formula. 29 | 30 | """ 31 | return Formula(self.formula + other_formula.formula) 32 | 33 | def parse_composition(self): 34 | """Breaks the chemical formula down by element.""" 35 | tmp_formula = self.formula 36 | # commonly occuring characters in incorrectly constructed formulas 37 | if "*" in tmp_formula: 38 | warn("invalid character '*' found in formula '%s'" % self.formula) 39 | tmp_formula = self.formula.replace("*", "") 40 | if "(" in tmp_formula or ")" in tmp_formula: 41 | warn("parenthesis found in formula '%s'" % self.formula) 42 | return 43 | composition = {} 44 | parsed = element_re.findall(tmp_formula) 45 | for (element, count) in parsed: 46 | if count == '': 47 | count = 1 48 | else: 49 | try: 50 | count = float(count) 51 | int_count = int(count) 52 | if count == int_count: 53 | count = int_count 54 | else: 55 | warn("%s is not an integer (in formula %s)" % 56 | (count, self.formula)) 57 | except ValueError: 58 | warn("failed to parse %s (in formula %s)" % 59 | (count, self.formula)) 60 | self.elements = {} 61 | return 62 | if element in composition: 63 | composition[element] += count 64 | else: 65 | composition[element] = count 66 | self.elements = composition 67 | 68 | @property 69 | def weight(self): 70 | """Calculate the formula weight""" 71 | try: 72 | return sum([count * elements_and_molecular_weights[element] 73 | for element, count in self.elements.items()]) 74 | except KeyError as e: 75 | warn("The element %s does not appear in the peridic table" % e) 76 | 77 | 78 | elements_and_molecular_weights = { 79 | 'H': 1.007940, 80 | 'He': 4.002602, 81 | 'Li': 6.941000, 82 | 'Be': 9.012182, 83 | 'B': 10.811000, 84 | 'C': 12.010700, 85 | 'N': 14.006700, 86 | 'O': 15.999400, 87 | 'F': 18.998403, 88 | 'Ne': 20.179700, 89 | 'Na': 22.989770, 90 | 'Mg': 24.305000, 91 | 'Al': 26.981538, 92 | 'Si': 28.085500, 93 | 'P': 30.973761, 94 | 'S': 32.065000, 95 | 'Cl': 35.453000, 96 | 'Ar': 39.948000, 97 | 'K': 39.098300, 98 | 'Ca': 40.078000, 99 | 'Sc': 44.955910, 100 | 'Ti': 47.867000, 101 | 'V': 50.941500, 102 | 'Cr': 51.996100, 103 | 'Mn': 54.938049, 104 | 'Fe': 55.845000, 105 | 'Co': 58.933200, 106 | 'Ni': 58.693400, 107 | 'Cu': 63.546000, 108 | 'Zn': 65.409000, 109 | 'Ga': 69.723000, 110 | 'Ge': 72.640000, 111 | 'As': 74.921600, 112 | 'Se': 78.960000, 113 | 'Br': 79.904000, 114 | 'Kr': 83.798000, 115 | 'Rb': 85.467800, 116 | 'Sr': 87.620000, 117 | 'Y': 88.905850, 118 | 'Zr': 91.224000, 119 | 'Nb': 92.906380, 120 | 'Mo': 95.940000, 121 | 'Tc': 98.000000, 122 | 'Ru': 101.070000, 123 | 'Rh': 102.905500, 124 | 'Pd': 106.420000, 125 | 'Ag': 107.868200, 126 | 'Cd': 112.411000, 127 | 'In': 114.818000, 128 | 'Sn': 118.710000, 129 | 'Sb': 121.760000, 130 | 'Te': 127.600000, 131 | 'I': 126.904470, 132 | 'Xe': 131.293000, 133 | 'Cs': 132.905450, 134 | 'Ba': 137.327000, 135 | 'La': 138.905500, 136 | 'Ce': 140.116000, 137 | 'Pr': 140.907650, 138 | 'Nd': 144.240000, 139 | 'Pm': 145.000000, 140 | 'Sm': 150.360000, 141 | 'Eu': 151.964000, 142 | 'Gd': 157.250000, 143 | 'Tb': 158.925340, 144 | 'Dy': 162.500000, 145 | 'Ho': 164.930320, 146 | 'Er': 167.259000, 147 | 'Tm': 168.934210, 148 | 'Yb': 173.040000, 149 | 'Lu': 174.967000, 150 | 'Hf': 178.490000, 151 | 'Ta': 180.947900, 152 | 'W': 183.840000, 153 | 'Re': 186.207000, 154 | 'Os': 190.230000, 155 | 'Ir': 192.217000, 156 | 'Pt': 195.078000, 157 | 'Au': 196.966550, 158 | 'Hg': 200.590000, 159 | 'Tl': 204.383300, 160 | 'Pb': 207.200000, 161 | 'Bi': 208.980380, 162 | 'Po': 209.000000, 163 | 'At': 210.000000, 164 | 'Rn': 222.000000, 165 | 'Fr': 223.000000, 166 | 'Ra': 226.000000, 167 | 'Ac': 227.000000, 168 | 'Th': 232.038100, 169 | 'Pa': 231.035880, 170 | 'U': 238.028910, 171 | 'Np': 237.000000, 172 | 'Pu': 244.000000, 173 | 'Am': 243.000000, 174 | 'Cm': 247.000000, 175 | 'Bk': 247.000000, 176 | 'Cf': 251.000000, 177 | 'Es': 252.000000, 178 | 'Fm': 257.000000, 179 | 'Md': 258.000000, 180 | 'No': 259.000000, 181 | 'Lr': 262.000000, 182 | 'Rf': 261.000000, 183 | 'Db': 262.000000, 184 | 'Sg': 266.000000, 185 | 'Bh': 264.000000, 186 | 'Hs': 277.000000, 187 | 'Mt': 268.000000, 188 | 'Ds': 281.000000, 189 | 'Rg': 272.000000, 190 | 'Cn': 285.000000, 191 | 'Uuq': 289.000000, 192 | 'Uuh': 292.000000 193 | } 194 | -------------------------------------------------------------------------------- /documentation_builder/gapfilling.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Gapfillling\n", 8 | "\n", 9 | "GrowMatch and SMILEY are gap-filling algorithms, which try to to make the minimal number of changes to a model and allow it to simulate growth. For more information, see [Kumar et al.](http://dx.doi.org/10.1371/journal.pcbi.1000308). Please note that these algorithms are Mixed-Integer Linear Programs, which need solvers such as gurobi or cplex to function correctly." 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 1, 15 | "metadata": { 16 | "collapsed": false 17 | }, 18 | "outputs": [], 19 | "source": [ 20 | "import cobra.test\n", 21 | "\n", 22 | "model = cobra.test.create_test_model(\"salmonella\")" 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "In this model D-Fructose-6-phosphate is an essential metabolite. We will remove all the reactions using it, and at them to a separate model." 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": 2, 35 | "metadata": { 36 | "collapsed": true 37 | }, 38 | "outputs": [], 39 | "source": [ 40 | "# remove some reactions and add them to the universal reactions\n", 41 | "Universal = cobra.Model(\"Universal_Reactions\")\n", 42 | "for i in [i.id for i in model.metabolites.f6p_c.reactions]:\n", 43 | " reaction = model.reactions.get_by_id(i)\n", 44 | " Universal.add_reaction(reaction.copy())\n", 45 | " reaction.remove_from_model()" 46 | ] 47 | }, 48 | { 49 | "cell_type": "markdown", 50 | "metadata": {}, 51 | "source": [ 52 | "Now, because of these gaps, the model won't grow." 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": 3, 58 | "metadata": { 59 | "collapsed": false 60 | }, 61 | "outputs": [ 62 | { 63 | "data": { 64 | "text/plain": [ 65 | "2.821531499799383e-12" 66 | ] 67 | }, 68 | "execution_count": 3, 69 | "metadata": {}, 70 | "output_type": "execute_result" 71 | } 72 | ], 73 | "source": [ 74 | "model.optimize().f" 75 | ] 76 | }, 77 | { 78 | "cell_type": "markdown", 79 | "metadata": {}, 80 | "source": [ 81 | "## GrowMatch\n", 82 | "\n", 83 | "We will use GrowMatch to add back the minimal number of reactions from this set of \"universal\" reactions (in this case just the ones we removed) to allow it to grow." 84 | ] 85 | }, 86 | { 87 | "cell_type": "code", 88 | "execution_count": 4, 89 | "metadata": { 90 | "collapsed": false 91 | }, 92 | "outputs": [ 93 | { 94 | "name": "stdout", 95 | "output_type": "stream", 96 | "text": [ 97 | "GF6PTA\n", 98 | "FBP\n", 99 | "MAN6PI_reverse\n", 100 | "TKT2_reverse\n", 101 | "PGI_reverse\n" 102 | ] 103 | } 104 | ], 105 | "source": [ 106 | "r = cobra.flux_analysis.growMatch(model, Universal)\n", 107 | "for e in r[0]:\n", 108 | " print(e.id)" 109 | ] 110 | }, 111 | { 112 | "cell_type": "markdown", 113 | "metadata": {}, 114 | "source": [ 115 | "We can obtain multiple possible reaction sets by having the algorithm go through multiple iterations." 116 | ] 117 | }, 118 | { 119 | "cell_type": "code", 120 | "execution_count": 5, 121 | "metadata": { 122 | "collapsed": false 123 | }, 124 | "outputs": [ 125 | { 126 | "name": "stdout", 127 | "output_type": "stream", 128 | "text": [ 129 | "---- Run 1 ----\n", 130 | "GF6PTA\n", 131 | "FBP\n", 132 | "MAN6PI_reverse\n", 133 | "TKT2_reverse\n", 134 | "PGI_reverse\n", 135 | "---- Run 2 ----\n", 136 | "F6PP\n", 137 | "GF6PTA\n", 138 | "TALA\n", 139 | "MAN6PI_reverse\n", 140 | "F6PA_reverse\n", 141 | "---- Run 3 ----\n", 142 | "GF6PTA\n", 143 | "MAN6PI_reverse\n", 144 | "TKT2_reverse\n", 145 | "F6PA_reverse\n", 146 | "PGI_reverse\n", 147 | "---- Run 4 ----\n", 148 | "F6PP\n", 149 | "GF6PTA\n", 150 | "FBP\n", 151 | "TALA\n", 152 | "MAN6PI_reverse\n" 153 | ] 154 | } 155 | ], 156 | "source": [ 157 | "result = cobra.flux_analysis.growMatch(model, Universal,\n", 158 | " iterations=4)\n", 159 | "for i, entries in enumerate(result):\n", 160 | " print(\"---- Run %d ----\" % (i + 1))\n", 161 | " for e in entries:\n", 162 | " print(e.id)" 163 | ] 164 | }, 165 | { 166 | "cell_type": "markdown", 167 | "metadata": {}, 168 | "source": [ 169 | "## SMILEY\n", 170 | "\n", 171 | "SMILEY is very similar to growMatch, only instead of setting growth as the objective, it sets production of a specific metabolite" 172 | ] 173 | }, 174 | { 175 | "cell_type": "code", 176 | "execution_count": 6, 177 | "metadata": { 178 | "collapsed": false 179 | }, 180 | "outputs": [ 181 | { 182 | "name": "stdout", 183 | "output_type": "stream", 184 | "text": [ 185 | "GF6PTA\n", 186 | "MAN6PI_reverse\n", 187 | "TKT2_reverse\n", 188 | "F6PA_reverse\n", 189 | "PGI_reverse\n" 190 | ] 191 | } 192 | ], 193 | "source": [ 194 | "r = cobra.flux_analysis.gapfilling.SMILEY(model, \"ac_e\",\n", 195 | " Universal)\n", 196 | "for e in r[0]:\n", 197 | " print(e.id)" 198 | ] 199 | } 200 | ], 201 | "metadata": { 202 | "kernelspec": { 203 | "display_name": "Python 3", 204 | "language": "python", 205 | "name": "python3" 206 | }, 207 | "language_info": { 208 | "codemirror_mode": { 209 | "name": "ipython", 210 | "version": 3 211 | }, 212 | "file_extension": ".py", 213 | "mimetype": "text/x-python", 214 | "name": "python", 215 | "nbconvert_exporter": "python", 216 | "pygments_lexer": "ipython3", 217 | "version": "3.4.3" 218 | } 219 | }, 220 | "nbformat": 4, 221 | "nbformat_minor": 0 222 | } 223 | -------------------------------------------------------------------------------- /documentation_builder/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = ../documentation 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | # the i18n builder cannot share the environment and doctrees with the others 15 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 16 | 17 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 18 | 19 | help: 20 | @echo "Please use \`make ' where is one of" 21 | @echo " html to make standalone HTML files" 22 | @echo " dirhtml to make HTML files named index.html in directories" 23 | @echo " singlehtml to make a single large HTML file" 24 | @echo " pickle to make pickle files" 25 | @echo " json to make JSON files" 26 | @echo " htmlhelp to make HTML files and a HTML help project" 27 | @echo " qthelp to make HTML files and a qthelp project" 28 | @echo " devhelp to make HTML files and a Devhelp project" 29 | @echo " epub to make an epub" 30 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 31 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 32 | @echo " text to make text files" 33 | @echo " man to make manual pages" 34 | @echo " texinfo to make Texinfo files" 35 | @echo " info to make Texinfo files and run them through makeinfo" 36 | @echo " gettext to make PO message catalogs" 37 | @echo " changes to make an overview of all changed/added/deprecated items" 38 | @echo " linkcheck to check all external links for integrity" 39 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 40 | 41 | clean: 42 | -rm -rf $(BUILDDIR)/* 43 | 44 | html: 45 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 46 | @echo 47 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 48 | 49 | dirhtml: 50 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 51 | @echo 52 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 53 | 54 | singlehtml: 55 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 56 | @echo 57 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 58 | 59 | pickle: 60 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 61 | @echo 62 | @echo "Build finished; now you can process the pickle files." 63 | 64 | json: 65 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 66 | @echo 67 | @echo "Build finished; now you can process the JSON files." 68 | 69 | htmlhelp: 70 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 71 | @echo 72 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 73 | ".hhp project file in $(BUILDDIR)/htmlhelp." 74 | 75 | qthelp: 76 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 77 | @echo 78 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 79 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 80 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/cobra.qhcp" 81 | @echo "To view the help file:" 82 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/cobra.qhc" 83 | 84 | devhelp: 85 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 86 | @echo 87 | @echo "Build finished." 88 | @echo "To view the help file:" 89 | @echo "# mkdir -p $$HOME/.local/share/devhelp/cobra" 90 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/cobra" 91 | @echo "# devhelp" 92 | 93 | epub: 94 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 95 | @echo 96 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 97 | 98 | latex: 99 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 100 | @echo 101 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 102 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 103 | "(use \`make latexpdf' here to do that automatically)." 104 | 105 | latexpdf: 106 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 107 | @echo "Running LaTeX files through pdflatex..." 108 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 109 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 110 | 111 | text: 112 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 113 | @echo 114 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 115 | 116 | man: 117 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 118 | @echo 119 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 120 | 121 | texinfo: 122 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 123 | @echo 124 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 125 | @echo "Run \`make' in that directory to run these through makeinfo" \ 126 | "(use \`make info' here to do that automatically)." 127 | 128 | info: 129 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 130 | @echo "Running Texinfo files through makeinfo..." 131 | make -C $(BUILDDIR)/texinfo info 132 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 133 | 134 | gettext: 135 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 136 | @echo 137 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 138 | 139 | changes: 140 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 141 | @echo 142 | @echo "The overview file is in $(BUILDDIR)/changes." 143 | 144 | linkcheck: 145 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 146 | @echo 147 | @echo "Link check complete; look for any errors in the above output " \ 148 | "or in $(BUILDDIR)/linkcheck/output.txt." 149 | 150 | doctest: 151 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 152 | @echo "Testing of doctests in the sources finished, look at the " \ 153 | "results in $(BUILDDIR)/doctest/output.txt." 154 | -------------------------------------------------------------------------------- /documentation_builder/pymatbridge.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "collapsed": false 7 | }, 8 | "source": [ 9 | "# Using the COBRA toolbox with cobrapy\n", 10 | "\n", 11 | "This example demonstrates using COBRA toolbox commands in MATLAB from python through [pymatbridge](http://arokem.github.io/python-matlab-bridge/)." 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": 1, 17 | "metadata": { 18 | "collapsed": false 19 | }, 20 | "outputs": [ 21 | { 22 | "name": "stdout", 23 | "output_type": "stream", 24 | "text": [ 25 | "Starting MATLAB on ZMQ socket ipc:///tmp/pymatbridge-57ff5429-02d9-4e1a-8ed0-44e391fb0df7\n", 26 | "Send 'exit' command to kill the server\n", 27 | "....MATLAB started and connected!\n" 28 | ] 29 | } 30 | ], 31 | "source": [ 32 | "%load_ext pymatbridge" 33 | ] 34 | }, 35 | { 36 | "cell_type": "code", 37 | "execution_count": 2, 38 | "metadata": { 39 | "collapsed": true 40 | }, 41 | "outputs": [], 42 | "source": [ 43 | "import cobra.test\n", 44 | "m = cobra.test.create_test_model(\"textbook\")" 45 | ] 46 | }, 47 | { 48 | "cell_type": "markdown", 49 | "metadata": {}, 50 | "source": [ 51 | "The model_to_pymatbridge function will send the model to the workspace with the given variable name." 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": 3, 57 | "metadata": { 58 | "collapsed": false 59 | }, 60 | "outputs": [], 61 | "source": [ 62 | "from cobra.io.mat import model_to_pymatbridge\n", 63 | "model_to_pymatbridge(m, variable_name=\"model\")" 64 | ] 65 | }, 66 | { 67 | "cell_type": "markdown", 68 | "metadata": {}, 69 | "source": [ 70 | "Now in the MATLAB workspace, the variable name 'model' holds a COBRA toolbox struct encoding the model." 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": 4, 76 | "metadata": { 77 | "collapsed": false 78 | }, 79 | "outputs": [ 80 | { 81 | "data": { 82 | "text/plain": [ 83 | "\n", 84 | "model = \n", 85 | "\n", 86 | " rev: [95x1 double]\n", 87 | " metNames: {72x1 cell}\n", 88 | " b: [72x1 double]\n", 89 | " metCharge: [72x1 double]\n", 90 | " c: [95x1 double]\n", 91 | " csense: [72x1 char]\n", 92 | " genes: {137x1 cell}\n", 93 | " metFormulas: {72x1 cell}\n", 94 | " rxns: {95x1 cell}\n", 95 | " grRules: {95x1 cell}\n", 96 | " rxnNames: {95x1 cell}\n", 97 | " description: [11x1 char]\n", 98 | " S: [72x95 double]\n", 99 | " ub: [95x1 double]\n", 100 | " lb: [95x1 double]\n", 101 | " mets: {72x1 cell}\n", 102 | " subSystems: {95x1 cell}\n", 103 | "\n" 104 | ] 105 | }, 106 | "metadata": {}, 107 | "output_type": "display_data" 108 | } 109 | ], 110 | "source": [ 111 | "%%matlab\n", 112 | "model" 113 | ] 114 | }, 115 | { 116 | "cell_type": "markdown", 117 | "metadata": {}, 118 | "source": [ 119 | "First, we have to initialize the COBRA toolbox in MATLAB." 120 | ] 121 | }, 122 | { 123 | "cell_type": "code", 124 | "execution_count": 5, 125 | "metadata": { 126 | "collapsed": true 127 | }, 128 | "outputs": [], 129 | "source": [ 130 | "%%matlab --silent\n", 131 | "warning('off'); % this works around a pymatbridge bug\n", 132 | "addpath(genpath('~/cobratoolbox/'));\n", 133 | "initCobraToolbox();" 134 | ] 135 | }, 136 | { 137 | "cell_type": "markdown", 138 | "metadata": {}, 139 | "source": [ 140 | "Commands from the COBRA toolbox can now be run on the model" 141 | ] 142 | }, 143 | { 144 | "cell_type": "code", 145 | "execution_count": 6, 146 | "metadata": { 147 | "collapsed": false 148 | }, 149 | "outputs": [ 150 | { 151 | "data": { 152 | "text/plain": [ 153 | "\n", 154 | "ans = \n", 155 | "\n", 156 | " x: [95x1 double]\n", 157 | " f: 0.8739\n", 158 | " y: [71x1 double]\n", 159 | " w: [95x1 double]\n", 160 | " stat: 1\n", 161 | " origStat: 5\n", 162 | " solver: 'glpk'\n", 163 | " time: 3.2911\n", 164 | "\n" 165 | ] 166 | }, 167 | "metadata": {}, 168 | "output_type": "display_data" 169 | } 170 | ], 171 | "source": [ 172 | "%%matlab\n", 173 | "optimizeCbModel(model)" 174 | ] 175 | }, 176 | { 177 | "cell_type": "markdown", 178 | "metadata": {}, 179 | "source": [ 180 | "FBA in the COBRA toolbox should give the same result as cobrapy (but maybe just a little bit slower :))" 181 | ] 182 | }, 183 | { 184 | "cell_type": "code", 185 | "execution_count": 7, 186 | "metadata": { 187 | "collapsed": false 188 | }, 189 | "outputs": [ 190 | { 191 | "name": "stdout", 192 | "output_type": "stream", 193 | "text": [ 194 | "CPU times: user 0 ns, sys: 0 ns, total: 0 ns\n", 195 | "Wall time: 5.48 µs\n" 196 | ] 197 | }, 198 | { 199 | "data": { 200 | "text/plain": [ 201 | "0.8739215069684909" 202 | ] 203 | }, 204 | "execution_count": 7, 205 | "metadata": {}, 206 | "output_type": "execute_result" 207 | } 208 | ], 209 | "source": [ 210 | "%time\n", 211 | "m.optimize().f" 212 | ] 213 | } 214 | ], 215 | "metadata": { 216 | "kernelspec": { 217 | "display_name": "Python 3", 218 | "language": "python", 219 | "name": "python3" 220 | }, 221 | "language_info": { 222 | "codemirror_mode": { 223 | "name": "ipython", 224 | "version": 3 225 | }, 226 | "file_extension": ".py", 227 | "mimetype": "text/x-python", 228 | "name": "python", 229 | "nbconvert_exporter": "python", 230 | "pygments_lexer": "ipython3", 231 | "version": "3.4.3" 232 | } 233 | }, 234 | "nbformat": 4, 235 | "nbformat_minor": 0 236 | } 237 | -------------------------------------------------------------------------------- /cobra/oven/aliebrahim/designAnalysis.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | import pylab 3 | 4 | #from ... import solvers 5 | from cobra import solvers 6 | from itertools import combinations 7 | 8 | 9 | def plot_production_envelope(model, target_id, n_points=20, plot=True, 10 | solver_name="glpk"): 11 | """Plot the production envelope for the model given a target 12 | 13 | Parameters 14 | ---------- 15 | model : cobra model 16 | The cobra model should already have the uptake rates se 17 | target_id : str 18 | The id of the exchange reaction for the target compound 19 | n_points : int 20 | The number of points to calculate for the production envolope 21 | plot : bool, optional 22 | Whether or not a plot should be made of the production envelope 23 | 24 | Returns 25 | ------- 26 | growth_rates : :class:`numpy.ndarray` 27 | An array of growth rates 28 | production_rates : :class:`numpy.ndarray` 29 | An array of the corresponding maximum production rate at the 30 | given growth rate. 31 | 32 | """ 33 | solver = solvers.solver_dict[solver_name] 34 | target_id = str(target_id) 35 | target_reaction = model.reactions.get_by_id(target_id) 36 | original_target_bounds = (target_reaction.lower_bound, 37 | target_reaction.upper_bound) 38 | lp = solver.create_problem(model) 39 | if solver.solve_problem(lp) != "optimal": 40 | return ([0], [0]) 41 | solution = solver.format_solution(lp, model) 42 | max_growth_rate = solution.f 43 | max_growth_production = solution.x_dict[target_reaction.id] 44 | #growth_coupled = max_growth_production > 0 45 | # extract the current objective so it can be changed 46 | original_objectives = {} 47 | for reaction in model.reactions: 48 | if reaction.objective_coefficient != 0: 49 | original_objectives[reaction] = reaction.objective_coefficient 50 | reaction.objective_coefficient = 0 51 | # calculate the maximum possible production rate 52 | target_reaction.objective_coefficient = 1 53 | model.optimize(objective_sense="minimize") 54 | min_production_rate = model.solution.f 55 | model.optimize(objective_sense="maximize") 56 | max_production_rate = model.solution.f 57 | production_rates = numpy.linspace(min_production_rate, 58 | max_production_rate, n_points) 59 | # ensure the point of production at maximum growth is included 60 | production_rates[ 61 | numpy.abs(production_rates - max_growth_production).argmin()] = \ 62 | max_growth_production 63 | # if the 0 point was overwritten in the last operation 64 | if production_rates[0] != 0: 65 | production_rates[1] = production_rates[0] 66 | production_rates[0] = 0 67 | growth_rates = production_rates * 0 68 | # make the objective coefficient what it was before 69 | target_reaction.objective_coefficient = 0 70 | for reaction, coefficient in original_objectives.iteritems(): 71 | reaction.objective_coefficient = coefficient 72 | # calculate the maximum growth rate at each production rate 73 | for i in range(n_points): 74 | target_reaction.lower_bound = production_rates[i] 75 | target_reaction.upper_bound = production_rates[i] 76 | solver.update_problem(lp, model) 77 | if solver.solve_problem(lp) == "optimal": 78 | growth_rates[i] = solver.get_objective_value(lp) 79 | else: 80 | growth_rates[i] = 0 81 | # reset the bounds on the target reaction 82 | target_reaction.lower_bound = original_target_bounds[0] 83 | target_reaction.upper_bound = original_target_bounds[1] 84 | if plot: 85 | pylab.plot(growth_rates, production_rates) 86 | pylab.title("Production envelope for %s" % (target_id)) 87 | pylab.xlabel("Growth rate") 88 | pylab.ylabel("Production rate") 89 | pylab.xlim(xmin=0) 90 | pylab.ylim(ymin=0) 91 | return (growth_rates, production_rates) 92 | 93 | 94 | def analyze_growth_coupled_num_knockouts(model, knockout_reaction, target_name="EX_etoh_e"): 95 | None 96 | 97 | 98 | def analyze_growth_coupled_design_subset(model, knockout_reactions, knockout_count, target_name="EX_etoh_e"): 99 | lp = model.optimize() 100 | best_score = 0 101 | best = [] 102 | lb = [None] * k # store lower bounds when reactions are knocked out 103 | ub = [None] * k # store upper bounds when reactions are knocked out 104 | for subset in combinations(knockout_reactions, knockout_reactions): 105 | # knockout reactions 106 | for i, reaction_name in enumerate(subset): 107 | reaction = model.reactions.get_by_id(str(reaction_name)) 108 | (lb[i], ub[i]) = (reaction.lower_bound, reaction.upper_bound) 109 | (reaction.lower_bound, reaction.upper_bound) = (0.0, 0.0) 110 | model.optimize() 111 | production = model.solution.x_dict[target_name] 112 | # identical performance 113 | if abs(production - best_score) < 0.001: 114 | best.append(subset) 115 | # better performance 116 | elif production > best_score: 117 | best_score = model.solution.x_dict[target_name] 118 | best = [subset] 119 | print model.solution.f, model.solution.x_dict[target_name] 120 | # reset reactions 121 | for i, reaction_name in enumerate(subset): 122 | (reaction.lower_bound, reaction.upper_bound) = (lb[i], ub[i]) 123 | return best_score, best 124 | 125 | if __name__ == "__main__": 126 | from cobra.test import ecoli_pickle, create_test_model 127 | from time import time 128 | 129 | model = create_test_model(ecoli_pickle) 130 | #from IPython import embed; embed() 131 | model.reactions.get_by_id("EX_o2_e").lower_bound = 0 132 | #analyze_strain_design(model, ["ABTA", "ACALD", "ACKr", "ATPS4rpp", "F6PA", 133 | # "GLUDy", "LDH_D", "MGSA", "PFL", "TPI"]) 134 | 135 | for i in ["ABTA", "ACALD", "ACKr", "ATPS4rpp", "F6PA", 136 | "GLUDy", "LDH_D", "MGSA", "PFL", "TPI"]: 137 | model.reactions.get_by_id(i).lower_bound = 0 138 | model.reactions.get_by_id(i).upper_bound = 0 139 | start = time() 140 | plot_production_envelope(model, "EX_etoh_e", solver_name="glpk", n_points=40, plot=True) 141 | print "ran in %.2f seconds" % (time() - start) 142 | pylab.show() 143 | # calculates in approx 1 seconds on 3.4 GHz i7 144 | -------------------------------------------------------------------------------- /cobra/flux_analysis/deletion_worker.py: -------------------------------------------------------------------------------- 1 | from multiprocessing import Queue, Process, cpu_count 2 | 3 | from ..solvers import get_solver_name, solver_dict 4 | from six import iteritems 5 | 6 | 7 | def compute_fba_deletion_worker(cobra_model, solver, job_queue, output_queue, 8 | **kwargs): 9 | solver = solver_dict[get_solver_name() if solver is None else solver] 10 | lp = solver.create_problem(cobra_model) 11 | solver_args = kwargs 12 | solver.solve_problem(lp) 13 | while True: 14 | indexes, label = job_queue.get() 15 | label = indexes if label is None else label 16 | result = compute_fba_deletion(lp, solver, cobra_model, indexes, 17 | **solver_args) 18 | output_queue.put((label, result)) 19 | 20 | 21 | def compute_fba_deletion(lp, solver_object, model, indexes, **kwargs): 22 | s = solver_object 23 | old_bounds = {} 24 | for i in indexes: 25 | reaction = model.reactions[i] 26 | old_bounds[i] = (reaction.lower_bound, reaction.upper_bound) 27 | s.change_variable_bounds(lp, i, 0., 0.) 28 | try: 29 | s.solve_problem(lp, **kwargs) 30 | except Exception as e: 31 | return RuntimeError("solver failure when deleting %s: %s" % 32 | (str(indexes), repr(e))) 33 | status = s.get_status(lp) 34 | objective = s.get_objective_value(lp) if status == "optimal" else 0. 35 | 36 | # reset the problem, which must be done after reading the solution 37 | for index, bounds in iteritems(old_bounds): 38 | s.change_variable_bounds(lp, index, bounds[0], bounds[1]) 39 | 40 | if status == "infeasible" or status == "optimal": 41 | return objective 42 | else: 43 | return RuntimeError("solver failure (status %s) for when deleting %s" % 44 | (status, str(indexes))) 45 | 46 | 47 | class CobraDeletionPool(object): 48 | """A pool of workers for solving deletions 49 | 50 | submit jobs to the pool using submit and recieve results using receive_all 51 | """ 52 | # Having an existing basis makes solving an existing LP much faster. The 53 | # most efficient approach is to have a worker function which modifies an LP 54 | # object and reverts it back after each calculation. Each lp object stores 55 | # the basis so subsequent LP's are solved more quickely, and memory does 56 | # not need to be re-allocated each time to create a new problem. Because 57 | # state is being saved, the workers in the deletion pool are careful about 58 | # reverting the object after simulating a deletion, and are written to be 59 | # flexible enough so they can be used in most applications instead of 60 | # writing a custom worker each time. 61 | 62 | def __init__(self, cobra_model, n_processes=None, solver=None, **kwargs): 63 | if n_processes is None: 64 | n_processes = min(cpu_count(), 4) 65 | # start queues 66 | self.job_queue = Queue() # format is (indexes, job_label) 67 | self.n_submitted = 0 68 | self.n_complete = 0 69 | self.output_queue = Queue() # format is (job_label, growth_rate) 70 | # start processes 71 | self.processes = [] 72 | for i in range(n_processes): 73 | p = Process(target=compute_fba_deletion_worker, 74 | args=[cobra_model, solver, 75 | self.job_queue, self.output_queue], 76 | kwargs=kwargs) 77 | self.processes.append(p) 78 | 79 | def start(self): 80 | for p in self.processes: 81 | p.start() 82 | 83 | def terminate(self): 84 | for p in self.processes: 85 | p.terminate() 86 | 87 | def __enter__(self): 88 | self.start() 89 | return self 90 | 91 | def __exit__(self, exc_type, exc_val, exc_tb): 92 | try: 93 | self.terminate() 94 | except: 95 | pass 96 | 97 | def submit(self, indexes, label=None): 98 | self.job_queue.put((indexes, label)) 99 | self.n_submitted += 1 100 | 101 | def receive_one(self): 102 | """This function blocks""" 103 | self.n_complete += 1 104 | result = self.output_queue.get() 105 | if isinstance(result[1], Exception): 106 | raise result[1] 107 | return result 108 | 109 | def receive_all(self): 110 | while self.n_complete < self.n_submitted: 111 | self.n_complete += 1 112 | result = self.output_queue.get() 113 | if isinstance(result[1], Exception): 114 | raise result[1] 115 | yield result 116 | 117 | @property 118 | def pids(self): 119 | return [p.pid for p in self.processes] 120 | 121 | def __del__(self): 122 | for process in self.processes: 123 | process.terminate() 124 | process.join() 125 | 126 | 127 | class CobraDeletionMockPool(object): 128 | """Mock pool solves LP's in the same process""" 129 | 130 | def __init__(self, cobra_model, n_processes=1, solver=None, **kwargs): 131 | if n_processes != 1: 132 | from warnings import warn 133 | warn("Mock Pool does not do multiprocessing") 134 | self.job_queue = [] 135 | self.solver_args = kwargs 136 | solver_name = get_solver_name() if solver is None else solver 137 | self.solver = solver_dict[solver_name] 138 | self.lp = self.solver.create_problem(cobra_model) 139 | self.solver.solve_problem(self.lp) 140 | self.model = cobra_model 141 | 142 | def submit(self, indexes, label=None): 143 | self.job_queue.append((indexes, label)) 144 | 145 | def receive_one(self): 146 | indexes, label = self.job_queue.pop() 147 | result = compute_fba_deletion(self.lp, self.solver, self.model, 148 | indexes, **self.solver_args) 149 | if isinstance(result, Exception): 150 | raise result 151 | return (label, result) 152 | 153 | def receive_all(self): 154 | for i in range(len(self.job_queue)): 155 | indexes, label = self.job_queue.pop() 156 | result = compute_fba_deletion(self.lp, self.solver, self.model, 157 | indexes, **self.solver_args) 158 | if isinstance(result, Exception): 159 | raise result 160 | yield (label, result) 161 | 162 | def start(self): 163 | None 164 | 165 | def terminate(self): 166 | None 167 | 168 | def __enter__(self): 169 | return self 170 | 171 | def __exit__(self, exc_type, exc_val, exc_tb): 172 | None 173 | -------------------------------------------------------------------------------- /cobra/solvers/glpk.pxd: -------------------------------------------------------------------------------- 1 | #inspired by sage/src/sage/numerical/backends/glpk_backend.pxd 2 | 3 | cdef extern from "glpk.h": 4 | ctypedef struct glp_prob "glp_prob": 5 | pass 6 | ctypedef struct glp_iocp "glp_iocp": 7 | int msg_lev 8 | int br_tech 9 | int bt_tech 10 | int pp_tech 11 | int fp_heur 12 | int gmi_cuts 13 | int mir_cuts 14 | int cov_cuts 15 | int clq_cuts 16 | double tol_int 17 | double tol_obj 18 | double mip_gap 19 | int tm_lim 20 | int out_frq 21 | int out_dly 22 | int presolve 23 | int binarize 24 | ctypedef struct glp_smcp "glp_smcp": 25 | int msg_lev 26 | int meth 27 | int pricing 28 | int r_test 29 | double tol_bnd 30 | double tol_dj 31 | double tol_piv 32 | double obj_ll 33 | double obj_ul 34 | int it_lim 35 | int tm_lim 36 | int out_frq 37 | int out_dly 38 | int presolve 39 | glp_iocp * new_glp_iocp "new glp_iocp" () 40 | void glp_init_iocp(glp_iocp *) 41 | void glp_init_smcp(glp_smcp *) 42 | glp_prob * glp_create_prob() 43 | void glp_set_prob_name(glp_prob *, char *) 44 | void glp_set_obj_dir(glp_prob *, int) 45 | void glp_add_rows(glp_prob *, int) 46 | void glp_add_cols(glp_prob *, int) 47 | void glp_del_rows(glp_prob *, int, int *) 48 | void glp_set_row_name(glp_prob *, int, char *) 49 | void glp_set_col_name(glp_prob *, int, char *) 50 | void glp_set_row_bnds(glp_prob *, int, int, double, double) 51 | void glp_set_col_bnds(glp_prob *, int, int, double, double) 52 | void glp_set_obj_coef(glp_prob *, int, double) 53 | void glp_load_matrix(glp_prob *, int, int *, int *, double *) 54 | int glp_simplex(glp_prob *, glp_smcp *) 55 | int glp_exact(glp_prob *, glp_smcp *) # requires gmp 56 | int glp_intopt(glp_prob *, glp_iocp *) 57 | void glp_std_basis(glp_prob *) 58 | void glp_delete_prob(glp_prob *) 59 | double glp_get_col_prim(glp_prob *, int) 60 | double glp_get_obj_val(glp_prob *) 61 | double glp_get_col_dual(glp_prob *, int) 62 | double glp_get_row_dual(glp_prob *, int) 63 | int glp_print_ranges(glp_prob *lp, int,int, int, char *fname) 64 | int glp_get_num_rows(glp_prob *) 65 | int glp_get_num_cols(glp_prob *) 66 | int glp_get_num_int(glp_prob *) 67 | double glp_mip_col_val(glp_prob *, int) 68 | double glp_mip_obj_val(glp_prob *) 69 | void glp_set_col_kind(glp_prob *, int, int) 70 | int glp_write_mps(glp_prob *lp, int fmt, void *parm, char *fname) 71 | int glp_write_lp(glp_prob *lp, void *parm, char *fname) 72 | int glp_write_prob(glp_prob *P, int flags, char *fname) 73 | int glp_read_prob(glp_prob *P, int flags, char *fname) 74 | 75 | void glp_set_prob_name(glp_prob *lp, char *name) 76 | void glp_set_obj_name(glp_prob *lp, char *name) 77 | void glp_set_row_name(glp_prob *lp, int i, char *name) 78 | void glp_set_col_name(glp_prob *lp, int i, char *name) 79 | 80 | double glp_get_row_ub(glp_prob *lp, int i) 81 | double glp_get_row_lb(glp_prob *lp, int i) 82 | 83 | double glp_get_col_ub(glp_prob *lp, int i) 84 | double glp_get_col_lb(glp_prob *lp, int i) 85 | void glp_set_col_ub(glp_prob *lp, int i, double value) 86 | void glp_set_col_lb(glp_prob *lp, int i, double value) 87 | 88 | 89 | void glp_create_index(glp_prob *P) 90 | int glp_find_row(glp_prob *P, const char *name) 91 | int glp_find_col(glp_prob *P, const char *name) 92 | void glp_delete_index(glp_prob *P) 93 | 94 | double glp_get_col_lb(glp_prob *lp, int i) 95 | double glp_get_col_ub(glp_prob *lp, int i) 96 | 97 | void glp_scale_prob(glp_prob *lp, int flags) 98 | void glp_unscale_prob(glp_prob *lp) 99 | 100 | int glp_get_prim_stat(glp_prob *lp) 101 | int glp_get_status(glp_prob *lp) 102 | int glp_mip_status(glp_prob *lp) 103 | int glp_get_num_nz(glp_prob *lp) 104 | int glp_set_mat_row(glp_prob *lp, int, int, int *, double * ) 105 | int glp_set_mat_col(glp_prob *lp, int, int, int *, double * ) 106 | int glp_get_mat_row(glp_prob *lp, int, int *, double * ) 107 | int glp_get_mat_col(glp_prob *lp, int, int *, double * ) 108 | double glp_get_row_ub(glp_prob *lp, int) 109 | double glp_get_row_lb(glp_prob *lp, int) 110 | int glp_get_col_kind(glp_prob *lp, int) 111 | double glp_get_obj_coef(glp_prob *lp, int) 112 | int glp_get_obj_dir(glp_prob *lp) 113 | void glp_copy_prob(glp_prob *dst, glp_prob *src, int names) 114 | 115 | const char *glp_version() 116 | 117 | # output redirection 118 | int glp_term_out(int flag) 119 | void glp_term_hook(int (*func)(void *info, const char *s), void *info) 120 | 121 | int glp_warm_up(glp_prob *P) 122 | void glp_adv_basis(glp_prob *P, int flags) 123 | 124 | # constants 125 | 126 | # constants for smcp control 127 | 128 | int GLP_MSG_OFF 129 | int GLP_MSG_ERR 130 | int GLP_MSG_ON 131 | int GLP_MSG_ALL 132 | 133 | int GLP_PRIMAL 134 | int GLP_DUALP 135 | int GLP_DUAL 136 | 137 | int GLP_PT_STD 138 | int GLP_PT_PSE 139 | 140 | int GLP_RT_STD 141 | int GLP_RT_HAR 142 | 143 | double DBL_MAX 144 | 145 | int INT_MAX 146 | 147 | int GLP_ON 148 | int GLP_OFF 149 | 150 | # constants for scaling the problem 151 | int GLP_SF_AUTO 152 | int GLP_SF_GM 153 | int GLP_SF_EQ 154 | int GLP_SF_2N 155 | int GLP_SF_SKIP 156 | 157 | # constants for iocp control, not already in simplex 158 | 159 | int GLP_BR_FFV 160 | int GLP_BR_LFV 161 | int GLP_BR_MFV 162 | int GLP_BR_DTH 163 | int GLP_BR_PCH 164 | 165 | int GLP_BT_DFS 166 | int GLP_BT_BFS 167 | int GLP_BT_BLB 168 | int GLP_BT_BPH 169 | 170 | int GLP_PP_NONE 171 | int GLP_PP_ROOT 172 | int GLP_PP_ALL 173 | 174 | # error codes 175 | int GLP_EBADB 176 | int GLP_ESING 177 | int GLP_ECOND 178 | int GLP_EBOUND 179 | int GLP_EFAIL 180 | int GLP_EOBJLL 181 | int GLP_EOBJUL 182 | int GLP_EITLIM 183 | int GLP_ETMLIM 184 | int GLP_ENOPFS 185 | int GLP_ENODFS 186 | int GLP_EROOT 187 | int GLP_ESTOP 188 | int GLP_EMIPGAP 189 | int GLP_ENOFEAS 190 | int GLP_ENOCVG 191 | int GLP_EINSTAB 192 | int GLP_EDATA 193 | int GLP_ERANGE 194 | 195 | 196 | int GLP_UNDEF 197 | int GLP_OPT 198 | int GLP_FEAS 199 | int GLP_NOFEAS 200 | int GLP_INFEAS 201 | int GLP_UNBND 202 | 203 | # other constants 204 | 205 | int GLP_MAX 206 | int GLP_MIN 207 | int GLP_UP 208 | int GLP_FR 209 | int GLP_DB 210 | int GLP_FX 211 | int GLP_LO 212 | int GLP_CV 213 | int GLP_IV 214 | int GLP_BV 215 | int GLP_MPS_DECK 216 | int GLP_MPS_FILE 217 | 218 | int GLP_MSG_DBG 219 | -------------------------------------------------------------------------------- /cobra/oven/aliebrahim/keggIO.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import re 3 | import copy 4 | from os.path import join, abspath, split 5 | 6 | import cobra 7 | 8 | # the default file locations 9 | kegg_directory = join(split(abspath(__file__))[0], "kegg_files") 10 | keggdictpath_default = join(kegg_directory, "kegg_dict.csv") 11 | reactionlst_default = join(kegg_directory, "reaction.lst") 12 | blacklistpath_default = join(kegg_directory, "kegg_blacklist.csv") 13 | 14 | 15 | def _intify(string): 16 | """returns integer representation of the str 17 | If str is a single letter, it will return 1""" 18 | if string.isdigit(): 19 | return int(string) 20 | # if the expression contains n, the default value is 2 21 | n = 2 22 | if string == "2n": 23 | return 2 * n 24 | try: 25 | return eval(string) 26 | except: 27 | raise ValueError(string) 28 | 29 | 30 | def _parse_split_array(str_array): 31 | """takes in an array of strings, each of which is either 32 | - a compound OR 33 | - a number followed by a compound 34 | returns [array_of_metabolites, corresponding_coefficient]""" 35 | metabolites = [] 36 | coefficients = [] 37 | for string in str_array: 38 | string = string.strip() 39 | if string[0].isupper(): # starts with an uppercase letter 40 | # there is no number associated, so it should be 1 41 | metabolites.append(string) 42 | coefficients.append(1) 43 | else: 44 | the_coefficient, the_metabolite = string.split() 45 | metabolites.append(the_metabolite) 46 | coefficients.append(_intify(the_coefficient)) 47 | return [metabolites, coefficients] 48 | 49 | 50 | def import_kegg_reactions(compartment="c", reactionlstpath=None, 51 | keggdictpath=None, blacklistpath=None): 52 | """reads in kegg reactions from the three given files 53 | compartment: the compartment to which each reaction will be added 54 | 55 | If no file is specified for any of these, a default file will be used: 56 | reactionlstpath: path to path of kegg reactions 57 | the format should be 58 | reactionid: Met1 + 2 Met2 <=> Met3 + 2 Met4 59 | 60 | keggdictpath: The path to a csv file translating between kegg and cobra 61 | metabolite ID's, where the first column contains the kegg ID, and the 62 | second contains cobra id 63 | 64 | blacklistpath: path to a file listing the blacklisted reactions, with 65 | one per line 66 | 67 | returns: cobra model with all of the included reactions 68 | """ 69 | 70 | if reactionlstpath is None: 71 | reactionlstpath = reactionlst_default 72 | if keggdictpath is None: 73 | keggdictpath = keggdictpath_default 74 | if blacklistpath is None: 75 | blacklistpath = blacklistpath_default 76 | 77 | # read in kegg dictionary to translate between kegg and cobra id's 78 | keggdictfile = open(keggdictpath, "r") 79 | keggdictcsv = csv.reader(keggdictfile) 80 | keggdict = {} 81 | for line in keggdictcsv: 82 | keggdict[line[1]] = line[0] 83 | keggdictfile.close() 84 | # read in the kegg blacklist 85 | keggblacklistfile = open(blacklistpath, "r") 86 | keggblacklistcsv = csv.reader(keggblacklistfile) 87 | keggblacklist = [] 88 | for line in keggblacklistcsv: 89 | keggblacklist.append(line[0]) 90 | keggblacklistfile.close() 91 | 92 | # parse the file of kegg reactions 93 | keggfile = open(reactionlstpath, "r") 94 | # regular expressions to split strings 95 | colon_sep = re.compile(":").split 96 | arrow_sep = re.compile("<=>").split 97 | plus_sep = re.compile(" \+ ").split 98 | keggreactions = [] 99 | cobra_reactions = [] 100 | used_metabolites = {} 101 | for line in keggfile: 102 | [id, reactionstr] = colon_sep(line, maxsplit=1) 103 | # remove whitespace 104 | id = id.strip() 105 | # if the id is in the blacklist, no need to proceed 106 | if id in keggblacklist: 107 | continue 108 | # split into reactants and products 109 | reactants_str, products_str = arrow_sep(reactionstr, maxsplit=1) 110 | # break up reactant and product strings into arrays of 111 | # metabolites and coefficients 112 | reactant_metabolites, reactant_coefficients = \ 113 | _parse_split_array(plus_sep(reactants_str)) 114 | product_metabolites, product_coefficients = \ 115 | _parse_split_array(plus_sep(products_str)) 116 | # reactant coefficients all need to be multiplied by -1 117 | for i, coeff in enumerate(reactant_coefficients): 118 | reactant_coefficients[i] = coeff * -1 119 | # make one array for all compoenents 120 | kegg_metabolites = reactant_metabolites 121 | coefficients = reactant_coefficients 122 | kegg_metabolites.extend(product_metabolites) 123 | coefficients.extend(product_coefficients) 124 | # translate the metabolites from kegg to cobra 125 | metabolites = [] 126 | try: 127 | for the_kegg_metabolite in kegg_metabolites: 128 | metabolites.append(keggdict[the_kegg_metabolite]) 129 | # if one of the metabolites is not found, skip to the next line 130 | except KeyError: 131 | continue 132 | 133 | # make a Kegg reaction 134 | reaction = cobra.Reaction(id) 135 | metabolite_dict = {} # dict of {metabolite : coefficient} 136 | for i, the_metabolite in enumerate(metabolites): 137 | metabolite_id = the_metabolite + "_" + compartment 138 | # if the metabolite already exists 139 | if metabolite_id in used_metabolites: 140 | used_metabolites[metabolite_id] = coefficients[i] 141 | else: 142 | # use a new metabolite 143 | new_metabolite = cobra.Metabolite(metabolite_id) 144 | used_metabolites[metabolite_id] = new_metabolite 145 | metabolite_dict[cobra.Metabolite(metabolite_id)] = \ 146 | coefficients[i] 147 | reaction.add_metabolites(metabolite_dict) 148 | reaction.notes["temporary_gapfilling_type"] = "Universal" 149 | # because the model will be converted to irreversible 150 | reaction.lower_bound = -1 * reaction.upper_bound 151 | cobra_reactions.append(reaction) 152 | keggfile.close() 153 | # add all of the reactions to a cobra model 154 | Universal = cobra.Model("Kegg_Universal_Reactions") 155 | Universal.add_reactions(cobra_reactions) 156 | return Universal 157 | if __name__ == "__main__": 158 | from time import time 159 | start_time = time() 160 | test_import = import_kegg_reactions() 161 | duration = time() - start_time 162 | print "imported %d reactions in %.2f sec" % \ 163 | (len(test_import.reactions), duration) 164 | -------------------------------------------------------------------------------- /cobra/solvers/esolver.py: -------------------------------------------------------------------------------- 1 | from subprocess import check_output, check_call, CalledProcessError 2 | from os import unlink, devnull 3 | from os.path import isfile 4 | from tempfile import NamedTemporaryFile 5 | from fractions import Fraction 6 | from six.moves import zip 7 | 8 | from . import cglpk 9 | from .wrappers import * 10 | 11 | # detect paths to system calls for esolver and gzip 12 | with open(devnull, "w") as DEVNULL: 13 | try: 14 | ESOLVER_COMMAND = check_output(["which", "esolver"], 15 | stderr=DEVNULL).strip() 16 | __esolver_version__ = check_output(["esolver", "-v"], stderr=DEVNULL) 17 | except CalledProcessError: 18 | raise RuntimeError("esolver command not found") 19 | try: 20 | GZIP_COMMAND = check_output(["which", "gzip"], stderr=DEVNULL).strip() 21 | except CalledProcessError: 22 | raise RuntimeError("gzip command not found") 23 | del DEVNULL 24 | 25 | solver_name = "esolver" 26 | 27 | 28 | class Esolver(cglpk.GLP): 29 | """contain an LP which will be solved through the QSopt_ex 30 | 31 | The LP is stored using a GLPK object, and written out to an 32 | LP file which is then solved by the esolver command.""" 33 | 34 | def __init__(self, cobra_model=None): 35 | cglpk.GLP.__init__(self, cobra_model) 36 | self.solution_filepath = None 37 | self.basis_filepath = None 38 | self.rational_solution = False 39 | self.verbose = False 40 | self.clean_up = True # clean up files 41 | 42 | def _clean(self, filename): 43 | """remove old files""" 44 | if self.clean_up and filename is not None and isfile(filename): 45 | unlink(filename) 46 | 47 | def set_parameter(self, parameter_name, value): 48 | if parameter_name == "GLP": 49 | raise Exception("can not be set this way") 50 | if parameter_name == "objective_sense": 51 | self.set_objective_sense(value) 52 | if not hasattr(self, parameter_name): 53 | raise ValueError("Unkonwn parameter '%s'" % parameter_name) 54 | setattr(self, parameter_name, value) 55 | 56 | def solve_problem(self, **solver_parameters): 57 | if "objective_sense" in solver_parameters: 58 | self.set_objective_sense(solver_parameters.pop("objective_sense")) 59 | for key, value in solver_parameters.items(): 60 | self.set_parameter(key, value) 61 | # remove the old solution file 62 | self._clean(self.solution_filepath) 63 | with NamedTemporaryFile(suffix=".lp", delete=False) as f: 64 | lp_filepath = f.name 65 | self.write(lp_filepath) 66 | existing_basis = self.basis_filepath 67 | with NamedTemporaryFile(suffix=".bas", delete=False) as f: 68 | self.basis_filepath = f.name 69 | with NamedTemporaryFile(suffix=".sol") as f: 70 | self.solution_filepath = f.name 71 | command = [ESOLVER_COMMAND, "-b", self.basis_filepath, 72 | "-O", self.solution_filepath[:-4]] 73 | if existing_basis is not None and isfile(existing_basis): 74 | command.extend(["-B", existing_basis]) 75 | command.extend(["-L", lp_filepath]) 76 | command_kwargs = {} 77 | if self.verbose: 78 | print(" ".join(command)) 79 | DEVNULL = None 80 | else: 81 | DEVNULL = open(devnull, 'wb') 82 | command_kwargs["stdout"] = DEVNULL 83 | command_kwargs["stderr"] = DEVNULL 84 | try: 85 | check_call(command, **command_kwargs) 86 | failed = False 87 | except CalledProcessError as e: 88 | failed = True 89 | if failed: 90 | self.basis_filepath = existing_basis 91 | existing_basis = None 92 | # Sometimes on failure a solution isn't written out 93 | if not isfile(self.solution_filepath): 94 | with open(self.solution_filepath, "w") as outfile: 95 | outfile.write("=infeasible\n") 96 | elif isfile(self.solution_filepath + ".gz"): 97 | # the solution may be written out compressed 98 | check_call([GZIP_COMMAND, "-d", self.solution_filepath + ".gz"]) 99 | if DEVNULL is not None: 100 | DEVNULL.close() 101 | self._clean(lp_filepath) 102 | self._clean(existing_basis) # replaced with the new basis 103 | 104 | def get_status(self): 105 | with open(self.solution_filepath) as infile: 106 | return infile.readline().split("=")[1].strip().lower() 107 | 108 | def _format(self, value): 109 | """convert a string value into either a fraction or float""" 110 | value = Fraction(value) 111 | return value if self.rational_solution else float(value) 112 | 113 | def get_objective_value(self): 114 | with open(self.solution_filepath) as infile: 115 | status = infile.readline().split("=")[1].strip().lower() 116 | if status != "optimal": 117 | raise RuntimeError("status not optimal") 118 | infile.readline() 119 | return self._format(infile.readline().split("=")[1].strip()) 120 | 121 | def format_solution(self, cobra_model): 122 | m = cobra_model 123 | solution = m.solution.__class__(None) 124 | with open(self.solution_filepath) as infile: 125 | solution.status = infile.readline().split("=")[1].strip().lower() 126 | if solution.status != "optimal": 127 | return solution 128 | infile.readline() 129 | solution.f = self._format(Fraction(infile.readline() 130 | .split("=")[1].strip())) 131 | infile.readline() 132 | value_dict = {} 133 | for line in infile: 134 | if line.endswith(":\n"): 135 | break 136 | varname, value = line.split("=") 137 | value_dict[varname.strip()] = self._format(value.strip()) 138 | dual_dict = {} 139 | for line in infile: 140 | if line.endswith(":\n"): 141 | break 142 | varname, value = line.split("=") 143 | dual_dict[varname.strip()] = self._format(value.strip()) 144 | solution.x = [value_dict.get("x_%d" % (i + 1), 0) 145 | for i in range(len(m.reactions))] 146 | solution.x_dict = {r.id: v for r, v in zip(m.reactions, solution.x)} 147 | solution.y = [dual_dict.get("r_%d" % (i + 1), 0) 148 | for i in range(len(m.metabolites))] 149 | solution.y_dict = {m.id: v for m, v in zip(m.metabolites, solution.y)} 150 | return solution 151 | 152 | 153 | # wrappers for the classmethods at the module level 154 | create_problem = Esolver.create_problem 155 | solve = Esolver.solve 156 | -------------------------------------------------------------------------------- /cobra/topology/reporter_metabolites.py: -------------------------------------------------------------------------------- 1 | # Based on Patil et al 2005 PNAS 102:2685-9 2 | # TODO: Validate cobra.core compliance 3 | from __future__ import print_function 4 | from numpy import array, mean, std, where 5 | from scipy.stats import norm, randint 6 | from six import iteritems 7 | 8 | 9 | def identify_reporter_metabolites(cobra_model, reaction_scores_dict, 10 | number_of_randomizations=1000, 11 | scoring_metric='default', score_type='p', 12 | entire_network=False, 13 | background_correction=True, 14 | ignore_external_boundary_reactions=False): 15 | """Calculate the aggregate Z-score for the metabolites in the model. 16 | Ignore reactions that are solely spontaneous or orphan. Allow the scores to 17 | have multiple columns / experiments. This will change the way the output 18 | is represented. 19 | 20 | cobra_model: A cobra.Model object 21 | 22 | TODO: CHANGE TO USING DICTIONARIES for the_reactions: the_scores 23 | 24 | reaction_scores_dict: A dictionary where the keys are reactions in 25 | cobra_model.reactions and the values are the scores. Currently, only 26 | supports a single numeric value as the value; however, this will be updated 27 | to allow for lists 28 | 29 | number_of_randomizations: Integer. Number of random shuffles of the 30 | scores to assess which are significant. 31 | 32 | scoring_metric: default means divide by k**0.5 33 | 34 | score_type: 'p' Is the only option at the moment and indicates p-value. 35 | 36 | entire_network: Boolean. Currently, only compares scores calculated from 37 | the_reactions 38 | 39 | background_correction: Boolean. If True apply background correction to the 40 | aggreagate Z-score 41 | 42 | ignore_external_boundary_reactions: Not yet implemented. Boolean. If True 43 | do not count exchange reactions when calculating the score. 44 | """ 45 | 46 | # Add in a function to calculate based on correlation coefficients and to 47 | # deal with other multidimensional data. 48 | the_reactions = reaction_scores_dict.keys() 49 | the_scores = reaction_scores_dict.values() 50 | if score_type == 'p' and not hasattr(the_scores[0], '__iter__'): 51 | # minimum and maximum p-values are used to prevent numerical problems. 52 | # haven't decided whether an arbitrary min / max 1e-15 is preferred to 53 | # blunting the ends based on the values closest to 0 or 1. 54 | the_reactions = reaction_scores_dict.keys() 55 | the_scores = array(reaction_scores_dict.values()) 56 | minimum_p = min(the_scores[the_scores.nonzero()[0]]) 57 | maximum_p = max(the_scores[where(the_scores < 1)[0]]) 58 | the_scores[where(the_scores < minimum_p)] = minimum_p 59 | the_scores[where(the_scores > maximum_p)] = maximum_p 60 | the_scores = -norm.ppf(the_scores) 61 | # update the dictionary with the new scores 62 | reaction_scores_dict = dict(zip(the_reactions, the_scores)) 63 | elif hasattr(the_scores[0], '__iter__'): 64 | # In the case that the_scores is a list of lists, assume that each list 65 | # is the score for each reaction in the_reactions across all reactions. 66 | # Then for each metabolite, calculate the invnorm(|Pearson Correlation 67 | # Coefficient| for each reaction pair that it links. 68 | raise Exception("This isn't implemented yet") 69 | 70 | # Get the connectivity for each metabolite 71 | the_metabolites = set() 72 | for x in reaction_scores_dict: 73 | the_metabolites.update(x._metabolites) 74 | 75 | metabolite_scores = {} 76 | metabolite_connections = {} 77 | # Calculate the score for each metabolite 78 | for the_metabolite in the_metabolites: 79 | nonspontaneous_connections = [x for x in the_metabolite._reaction 80 | if x.gene_reaction_rule.lower() not in 81 | ['s0001', '']] 82 | tmp_score = 0 83 | number_of_connections = len(nonspontaneous_connections) 84 | for the_reaction in nonspontaneous_connections: 85 | if the_reaction not in reaction_scores_dict: 86 | if not entire_network: 87 | number_of_connections -= 1 88 | continue 89 | else: 90 | tmp_score += reaction_scores_dict[the_reaction] 91 | metabolite_scores[the_metabolite] = tmp_score 92 | metabolite_connections[the_metabolite] = number_of_connections 93 | 94 | # NOTE: Doing the corrections based only on the significantly perturbed 95 | # scores is probably going to underestimate the significance. 96 | if background_correction: 97 | correction_dict = {} 98 | for i in set(metabolite_connections.values()): 99 | # if entire_network # add in a section to deal with the situation 100 | # where the entire network structure is considered by only have 101 | # p-values for a limited subset. 102 | # 103 | # Basically, what we're doing here is that for each i we select i 104 | # scores number_of_randomizations times 105 | the_random_indices = randint.rvs( 106 | 0, len(the_scores), size=(number_of_randomizations, i)) 107 | random_score_distribution = array( 108 | [sum(the_scores[x]) 109 | for x in list(the_random_indices)]) / i**0.5 110 | correction_dict[i] = [mean(random_score_distribution), 111 | std(random_score_distribution, ddof=1)] 112 | 113 | for the_metabolite, the_score in iteritems(metabolite_scores): 114 | number_of_connections = metabolite_connections[the_metabolite] 115 | if number_of_connections > 0: 116 | # Correct based on background distribution 117 | if background_correction: 118 | # if the list of scores is only for significant perturbations 119 | # then the background correction shouldn't be applied because 120 | # the current sampling method only takes into account 121 | # the_scores not the entire network. It'd be more accurate to 122 | # assign unscored reactions a default score. 123 | the_score = ((the_score / number_of_connections**.5) - 124 | correction_dict[number_of_connections][0]) / \ 125 | correction_dict[number_of_connections][1] 126 | else: 127 | the_score = the_score / number_of_connections**.5 128 | # Update the score 129 | metabolite_scores[the_metabolite] = the_score 130 | 131 | return_dictionary = {'scores': metabolite_scores, 132 | 'connections': metabolite_connections} 133 | if background_correction: 134 | return_dictionary['corrections'] = correction_dict 135 | 136 | return return_dictionary 137 | -------------------------------------------------------------------------------- /cobra/oven/danielhyduke/jython/numpy/core/Matrix.py: -------------------------------------------------------------------------------- 1 | #cobra.numjy.Matrix.py 2 | #Basic matrix class that is going to be used to mimic numpy.ndarray 3 | #capabilities. 4 | # 5 | #Derived from Simon Galbraith's ncajava Matrix.py file 6 | import java, javax, jarray 7 | from copy import deepcopy 8 | from cern.colt.matrix import DoubleMatrix2D 9 | from cern.colt.matrix.DoubleFactory2D import dense,sparse; 10 | from cern.colt.matrix.impl import DenseDoubleMatrix2D as ndarray 11 | from cern.colt.matrix.impl import SparseDoubleMatrix2D as sdarray 12 | from cern.colt.matrix.linalg import Algebra; 13 | from org.python.core.exceptions import ValueError as PyValueException; 14 | from org.python.core import PyString,PySlice,PySequence,PyList; 15 | 16 | class Matrix(javax.swing.table.AbstractTableModel): 17 | _M = None; 18 | _name = 'data matrix' 19 | varname = '' 20 | column_names = [] 21 | row_names = [] 22 | 23 | def __init__(self,M=None,N=None,v=None,sparse=None): 24 | 25 | """ 26 | M is the number of rows 27 | N is the number of columns 28 | v is the default value 29 | """ 30 | if isinstance(M,DoubleMatrix2D): 31 | self._M = M.copy(); 32 | elif (isinstance(M,int) and isinstance(N,int)): 33 | if sparse: 34 | F = sparse 35 | else: 36 | F = dense; 37 | if v is None: 38 | self._M = F.make(M,N,0); 39 | elif isinstance(v,int): 40 | self._M = F.make(M,N,v); 41 | elif isinstance(v,PyList): 42 | self._M = F.make(jarray.array(v,'d'),1) 43 | elif isinstance(v,PyString): 44 | self._M = F.random(M,N); 45 | else: 46 | if sparse: 47 | self._M = SparseDoubleMatrix2D(v) 48 | else: 49 | self._M = ndarray(v) 50 | self.shape = (self._M.rows(),self._M.columns()) 51 | 52 | 53 | def __copy__(self): 54 | r = new.instance(self.__class__, self.__dict__.copy() ) 55 | r._M = self._M.copy(); 56 | print "in copy" 57 | return r 58 | 59 | def __sub__(A,B): 60 | 61 | [ar,ac]=size(A); 62 | C = Matrix(ar,ac,0); 63 | for i in range(1,ar): 64 | for j in range(1,ac): 65 | C[i,j]=A[i,j]-B[i,j] 66 | return C; 67 | 68 | def __mul__(A,B): 69 | 70 | # need to check types and multiple based on them.. 71 | try: 72 | F = Algebra(); 73 | C=(F.mult(A._M,B._M)); 74 | except: 75 | raise PyValueException, "Inner dimension mismatch in matrix multiply."; 76 | return None; 77 | return Matrix(C) 78 | def __div__(A,B): 79 | #print size(A) 80 | 81 | try: 82 | F = Algebra(); 83 | R = F.solve(A._M,B._M); 84 | return R; 85 | except (java.lang.IllegalArgumentException) , e : 86 | # check the error class types according to the matrix class so we can intelligently report the error. 87 | print e.getMessage(); 88 | return None; 89 | 90 | 91 | def __repr__(self): 92 | return self._M.toString(); 93 | 94 | def __str__(self): 95 | return self._M.toString(); 96 | 97 | def __sz__(self): 98 | if isinstance(self,Matrix): 99 | x=self._M.rows(); 100 | y=self._M.columns(); 101 | return (x,y); 102 | else: 103 | raise PyValueException, "Argument must be a matrix."; 104 | 105 | def __setitem__(self,idx,v): 106 | 107 | if v is None: 108 | print idx 109 | raise PyValueException, "v is none" 110 | 111 | if isinstance(v,float): 112 | self._M.set(idx[0],idx[1],v); 113 | return; 114 | 115 | Y=idx[1]; 116 | X=idx[0]; 117 | 118 | if isinstance(X,PyList): 119 | X=map(lambda x: x, X) 120 | elif isinstance(X,PySlice): 121 | if X.start == None: 122 | X=range(0,self._M.rows()) 123 | elif isinstance(X,int): 124 | X=[X]; 125 | 126 | if isinstance(Y,PyList): 127 | Y=map(lambda x: x,Y); 128 | elif isinstance(Y,PySlice): 129 | if Y.start == None: 130 | Y=range(0,self._M.cols()) 131 | elif isinstance(Y,int): 132 | Y=[Y]; 133 | 134 | order=0; 135 | if len(X)>len(Y): 136 | order=1; 137 | 138 | #print "the order is " , order 139 | if order==0: 140 | y=1 141 | for q in Y: 142 | x=1 143 | for z in X: 144 | # print z,q,x,y,v 145 | self._M.set(z,q,v[x,y]); 146 | x+=1; 147 | y+=1; 148 | else: 149 | x=1 150 | for z in X: 151 | y=1; 152 | for q in Y: 153 | self._M.set(z,q,v[x,y]); 154 | y+=1; 155 | x+=1; 156 | 157 | 158 | def __getslice__(self, i, j): 159 | 160 | if i.start != None: 161 | x=range(i.start,i.stop); 162 | else: 163 | x=range(0,self._M.rows()) 164 | if j.start !=None: 165 | y=range(j.start,j.stop) 166 | else: 167 | y=range(0,self._M.columns()) 168 | 169 | return Matrix(self._M.viewSelection(x,y)); 170 | 171 | 172 | def __getitem__(self,idx): 173 | x=idx[0]; 174 | y=idx[1]; 175 | if x<0 or y<0: 176 | raise PyValueException, "Index must be positive number"; 177 | # this will fail on pyslice 178 | 179 | if isinstance(x,PySlice): 180 | if x.start != None: 181 | x=range(x.start,x.stop); 182 | else: 183 | x=range(0,self._M.rows()) 184 | elif isinstance(x,int): 185 | x=x; 186 | x=[x]; 187 | elif isinstance(x,PyList): 188 | x=map(lambda x: x, x) 189 | 190 | if isinstance(y,int): 191 | y=y; 192 | y=[y]; 193 | elif isinstance(y,PySlice): 194 | if y.start != None: 195 | y=range(y.start,y.stop); 196 | else: 197 | y=range(0,self._M.columns()) 198 | elif isinstance(y,PySlice): 199 | if y.start !=None: 200 | y=range(y.start,y.stop) 201 | else: 202 | y=range(0,self._M.columns()) 203 | elif isinstance(y,PyList): 204 | y=map(lambda x: x, y) 205 | 206 | 207 | if len(x)<2 and len(y)<2: 208 | r = self._M.getQuick(x[0],y[0]); 209 | return float(r) # this is a specific element 210 | else: 211 | return Matrix(self._M.viewSelection(x,y)); 212 | -------------------------------------------------------------------------------- /cobra/oven/aliebrahim/simphenyIO.py: -------------------------------------------------------------------------------- 1 | from os.path import isfile 2 | import csv 3 | import re 4 | from warnings import warn 5 | 6 | import cobra 7 | 8 | 9 | def export_flux_distribution(model, filepath): 10 | """Export flux distribution to import into Simpheny. 11 | 12 | Parameters 13 | ---------- 14 | model : cobra.Model 15 | filepath: str 16 | 17 | """ 18 | from simphenyMapping import mapping 19 | outfile = open(filepath, "w") 20 | outcsv = csv.writer(outfile, delimiter="\t", lineterminator="\n") 21 | outcsv.writerow(["Reaction Number", "Flux Value", 22 | "Lower Bound", "Upper Bound"]) 23 | for reaction_name, reaction_flux in model.solution.x_dict.iteritems(): 24 | reaction = model.reactions.get_by_id(reaction_name) 25 | try: 26 | outcsv.writerow([mapping[reaction_name], reaction_flux, 27 | reaction.lower_bound, reaction.upper_bound]) 28 | except KeyError, e: 29 | print "Simpheny id number not found for", e 30 | outfile.close() 31 | 32 | 33 | def _header_count(filename): 34 | """count the number of header lines in a file 35 | The header is defined as over when a line is found which begins 36 | with a number""" 37 | file = open(filename, "r") 38 | for i, line in enumerate(file): 39 | if line[0].isdigit(): 40 | file.close() 41 | return i 42 | file.close() 43 | return False 44 | 45 | 46 | def _open_and_skip_header(filename): 47 | """returns a csv file with the header skipped""" 48 | count = _header_count(filename) 49 | if not count: 50 | raise (IOError, "%s corrupted" % filename) 51 | file = open(filename, "r") 52 | for i in range(count): 53 | file.readline() 54 | return csv.reader(file, delimiter="\t") 55 | 56 | 57 | def _find_metabolites_by_base(base, metabolites): 58 | """search for all metabolites in the list which match the base name. 59 | For example, "h2o" will identify both "h2o(c)" and "h2o(e)" """ 60 | search = re.compile(base + "\([a-z]\)") 61 | found = [] 62 | for the_metabolite in metabolites: 63 | if search.match(the_metabolite.id) is not None: 64 | found.append(the_metabolite) 65 | return found 66 | 67 | 68 | def read_simpheny(baseName, min_lower_bound=-1000, max_upper_bound=1000, 69 | maximize_info=True): 70 | r"""Imports files exported from a SimPheny simulation as a cobra model. 71 | 72 | .. warning:: Use with caution. This is a legacy import function, and 73 | errors have been observed in the converted gene-reaction rules. 74 | 75 | Parameters 76 | ---------- 77 | baseName : str 78 | The filepath to the exported SimPheny files without any of the 79 | extensions. On Windows, it helps if baseName is a raw string 80 | (i.e. r"Path\\to\\files") 81 | min_lower_bound, max_upper_bound : float or int, optional 82 | The bounds on the lower and upper bounds of fluxes in model. 83 | maximize_info : bool, optional 84 | An optional boolean keyword argument. If True, then an attempt 85 | will be made to parse the gpr and metabolite info files, and the 86 | function will take a little bit longer. 87 | 88 | Returns 89 | ------- 90 | model : cobra.Model 91 | the imported simpheny model 92 | 93 | """ 94 | 95 | # check to make sure the files can be read 96 | if not(isfile(baseName + ".met") 97 | and isfile(baseName + ".rxn") 98 | and isfile(baseName + ".sto")): 99 | # try again with modifying the baseName 100 | baseName = baseName.encode("string-escape") 101 | if not(isfile(baseName + ".met") 102 | and isfile(baseName + ".rxn") 103 | and isfile(baseName + ".sto")): 104 | raise (IOError, "Input file(s) not found") 105 | model = cobra.Model("SimPheny import from " + baseName) 106 | # read in metabolite file 107 | metfile = _open_and_skip_header(baseName + ".met") 108 | metabolites = [] 109 | for line in metfile: 110 | if len(line) == 0: 111 | break 112 | metabolite = cobra.Metabolite(id=line[1], name=line[2], 113 | compartment=line[3]) 114 | if maximize_info: 115 | compartment_search = re.findall("\([a-z]\)$", metabolite.id) 116 | if compartment_search != []: 117 | metabolite.compartment = compartment_search[0][1] 118 | model.compartments[metabolite.compartment] = line[3] 119 | metabolites.append(metabolite) 120 | model.add_metabolites(metabolites) 121 | # scalefunc will limit the maximum and minumum fluxes 122 | scalefunc = lambda x: max(min(max_upper_bound, x), min_lower_bound) 123 | # read in reaction file 124 | reaction_file = _open_and_skip_header(baseName + ".rxn") 125 | reactions = [] 126 | for line in reaction_file: 127 | if len(line) == 0: 128 | break 129 | the_reaction = cobra.Reaction() 130 | the_reaction.id = line[1] 131 | the_reaction.name = line[2] 132 | if line[3].lower() == "reversible": 133 | the_reaction.reversibility = 1 134 | elif line[3].lower() == "irreversible": 135 | the_reaction.reversibility = 0 136 | the_reaction.lower_bound = scalefunc(float(line[4])) 137 | the_reaction.upper_bound = scalefunc(float(line[5])) 138 | the_reaction.objective_coefficient = float(line[6]) 139 | reactions.append(the_reaction) 140 | model.add_reactions(reactions) 141 | # read in S matrix 142 | Sfile = _open_and_skip_header(baseName + ".sto") 143 | S = [] 144 | for i, line in enumerate(Sfile): 145 | if len(line) == 0: 146 | break 147 | the_metabolite = metabolites[i] 148 | for j, ns in enumerate(line): 149 | n = float(ns) 150 | if n != 0: 151 | model.reactions[j].add_metabolites({the_metabolite: n}) 152 | # attempt to read in more data 153 | infofilepath = baseName + "_cmpd.txt" 154 | if maximize_info and isfile(infofilepath): 155 | infofile = open(infofilepath, "r") 156 | infofile.readline() # skip the header 157 | infocsv = csv.reader(infofile) 158 | for row in infocsv: 159 | found = _find_metabolites_by_base(row[0], model.metabolites) 160 | for found_metabolite in found: 161 | found_metabolite.formula = row[2] 162 | found_metabolite.parse_composition() 163 | found_metabolite.charge = row[4] 164 | found_metabolite.notes = {} 165 | found_metabolite.notes["KEGG_id"] = row[8] 166 | found_metabolite.notes["CAS"] = row[5] 167 | found_metaboltie.notes["review status"] = row[3] 168 | infofile.close() 169 | gpr_filepath = baseName + "_gpr.txt" 170 | if maximize_info and isfile(gpr_filepath): 171 | warn("SimPheny export files may have errors in the gpr.") 172 | # Using this may be risky 173 | gpr_file = open(gpr_filepath, "r") 174 | gpr_file.readline() # skip the header 175 | gpr_csv = csv.reader(gpr_file) 176 | for row in gpr_csv: 177 | the_reaction = model.reactions[model.reactions.index(row[0])] 178 | the_reaction.gene_reaction_rule = row[5] 179 | the_reaction.parse_gene_association() 180 | gpr_file.close() 181 | # model.update() 182 | return model 183 | 184 | 185 | -------------------------------------------------------------------------------- /cobra/test/data/salmonella.media: -------------------------------------------------------------------------------- 1 | ccollections 2 | defaultdict 3 | p1 4 | (c__builtin__ 5 | dict 6 | p2 7 | tRp3 8 | S'LB' 9 | p4 10 | (dp5 11 | S'EX_ser__L_e' 12 | p6 13 | F-5 14 | sS'EX_dcyt_e' 15 | p7 16 | F-5 17 | sS'EX_hg2_e' 18 | p8 19 | F-1000 20 | sS'EX_ins_e' 21 | p9 22 | F-5 23 | sS'EX_cd2_e' 24 | p10 25 | F-1000 26 | sS'EX_nac_e' 27 | p11 28 | F-5 29 | sS'EX_tungs_e' 30 | p12 31 | F-1000 32 | sS'EX_glu__L_e' 33 | p13 34 | F-5 35 | sS'EX_trp__L_e' 36 | p14 37 | F-5 38 | sS'EX_h_e' 39 | p15 40 | F-100 41 | sS'EX_mobd_e' 42 | p16 43 | F-1000 44 | sS'EX_val__L_e' 45 | p17 46 | F-5 47 | sS'EX_cobalt2_e' 48 | p18 49 | F-1000 50 | sS'EX_so4_e' 51 | p19 52 | F-1000 53 | sS'EX_co2_e' 54 | p20 55 | F-1000 56 | sS'EX_k_e' 57 | p21 58 | F-1000 59 | sS'EX_cu2_e' 60 | p22 61 | F-1000 62 | sS'EX_zn2_e' 63 | p23 64 | F-1000 65 | sS'EX_na1_e' 66 | p24 67 | F-1000 68 | sS'EX_cl_e' 69 | p25 70 | F-1000 71 | sS'EX_leu__L_e' 72 | p26 73 | F-5 74 | sS'EX_arg__L_e' 75 | p27 76 | F-5 77 | sS'EX_pnto__R_e' 78 | p28 79 | F-5 80 | sS'EX_lys__L_e' 81 | p29 82 | F-5 83 | sS'EX_ala__L_e' 84 | p30 85 | F-5 86 | sS'EX_cbl1_e' 87 | p31 88 | F-0.01 89 | sS'EX_thr__L_e' 90 | p32 91 | F-5 92 | sS'EX_fe3_e' 93 | p33 94 | F-1000 95 | sS'EX_adn_e' 96 | p34 97 | F-5 98 | sS'EX_pi_e' 99 | p35 100 | F-1000 101 | sS'EX_thymd_e' 102 | p36 103 | F-5 104 | sS'EX_mn2_e' 105 | p37 106 | F-1000 107 | sS'EX_phe__L_e' 108 | p38 109 | F-5 110 | sS'EX_ura_e' 111 | p39 112 | F-5 113 | sS'EX_dad__2_e' 114 | p40 115 | F-5 116 | sS'EX_h2o_e' 117 | p41 118 | F-100 119 | sS'EX_aso3_e' 120 | p42 121 | F-1000 122 | sS'EX_hxan_e' 123 | p43 124 | F-5 125 | sS'EX_glc__D_e' 126 | p44 127 | F-5 128 | sS'EX_uri_e' 129 | p45 130 | F-5 131 | sS'EX_his__L_e' 132 | p46 133 | F-5 134 | sS'EX_o2_e' 135 | p47 136 | F-18.5 137 | sS'EX_pro__L_e' 138 | p48 139 | F-5 140 | sS'EX_asp__L_e' 141 | p49 142 | F-5 143 | sS'EX_gly_e' 144 | p50 145 | F-5 146 | sS'EX_fe2_e' 147 | p51 148 | F-1000 149 | sS'EX_ca2_e' 150 | p52 151 | F-1000 152 | sS'EX_mg2_e' 153 | p53 154 | F-1000 155 | sS'EX_cysi__L_e' 156 | p54 157 | F-5 158 | sS'EX_tyr__L_e' 159 | p55 160 | F-5 161 | sS'EX_met__L_e' 162 | p56 163 | F-5 164 | sS'EX_ile__L_e' 165 | p57 166 | F-5 167 | ssS'Host' 168 | p58 169 | (dp59 170 | g7 171 | F-5 172 | sS'EX_melib_e' 173 | p60 174 | F-5 175 | sg9 176 | F-5 177 | sg18 178 | F-10 179 | sS'EX_dmso_e' 180 | p61 181 | F-1000 182 | sS'EX_acnam_e' 183 | p62 184 | F-5 185 | sS'EX_thm_e' 186 | p63 187 | F-5 188 | sS'EX_glcn_e' 189 | p64 190 | F-5 191 | sg45 192 | F-5 193 | sS'EX_rib__D_e' 194 | p65 195 | F-5 196 | sg12 197 | F-10 198 | sS'EX_malt_e' 199 | p66 200 | F-5 201 | sg13 202 | F-5 203 | sS'EX_arab__L_e' 204 | p67 205 | F-5 206 | sS'EX_12ppd__S_e' 207 | p68 208 | F-5 209 | sg15 210 | F-100 211 | sg16 212 | F-10 213 | sS'EX_mnl_e' 214 | p69 215 | F-5 216 | sg53 217 | F-10 218 | sg19 219 | F-1000 220 | sg20 221 | F-1000 222 | sS'EX_glcr_e' 223 | p70 224 | F-5 225 | sg21 226 | F-10 227 | sg22 228 | F-10 229 | sg24 230 | F-1000 231 | sg31 232 | F-0.01 233 | sg25 234 | F-1000 235 | sS'EX_sbt__D_e' 236 | p71 237 | F-5 238 | sg27 239 | F-5 240 | sg28 241 | F-5 242 | sg29 243 | F-5 244 | sS'EX_fuc__L_e' 245 | p72 246 | F-5 247 | sg30 248 | F-5 249 | sS'EX_csn_e' 250 | p73 251 | F-5 252 | sS'EX_gal_e' 253 | p74 254 | F-5 255 | sS'EX_crn_e' 256 | p75 257 | F-5 258 | sS'EX_no3_e' 259 | p76 260 | F-1000 261 | sg32 262 | F-5 263 | sg33 264 | F-10 265 | sg34 266 | F-5 267 | sg35 268 | F-1000 269 | sS'EX_glcur_e' 270 | p77 271 | F-5 272 | sg36 273 | F-5 274 | sg37 275 | F-10 276 | sS'EX_etha_e' 277 | p78 278 | F-5 279 | sS'EX_galt_e' 280 | p79 281 | F-5 282 | sg39 283 | F-5 284 | sg40 285 | F-5 286 | sS'EX_gsn_e' 287 | p80 288 | F-5 289 | sg41 290 | F-100 291 | sg43 292 | F-5 293 | sg44 294 | F-5 295 | sS'EX_galct__D_e' 296 | p81 297 | F-5 298 | sg46 299 | F-5 300 | sg56 301 | F-5 302 | sg47 303 | F-18.5 304 | sS'EX_asn__L_e' 305 | p82 306 | F-5 307 | sg48 308 | F-5 309 | sS'EX_acgam_e' 310 | p83 311 | F-5 312 | sS'EX_so3_e' 313 | p84 314 | F-1000 315 | sS'EX_cys__L_e' 316 | p85 317 | F-5 318 | sS'EX_rmn_e' 319 | p86 320 | F-5 321 | sg52 322 | F-10 323 | sg17 324 | F-5 325 | sS'EX_man_e' 326 | p87 327 | F-5 328 | sg23 329 | F-10 330 | sg26 331 | F-5 332 | sS'EX_fru_e' 333 | p88 334 | F-5 335 | sg51 336 | F-10 337 | sS'EX_alltn_e' 338 | p89 339 | F-5 340 | sS'EX_galctn__D_e' 341 | p90 342 | F-5 343 | sg57 344 | F-5 345 | ssS'M9' 346 | p91 347 | (dp92 348 | g18 349 | F-1000 350 | sg12 351 | F-1000 352 | sg15 353 | F-100 354 | sg16 355 | F-1000 356 | sg53 357 | F-1000 358 | sg19 359 | F-1000 360 | sg20 361 | F-1000 362 | sg21 363 | F-1000 364 | sg33 365 | F-1000 366 | sg24 367 | F-1000 368 | sg25 369 | F-1000 370 | sg22 371 | F-1000 372 | sS'EX_nh4_e' 373 | p93 374 | F-1000 375 | sg51 376 | F-1000 377 | sg31 378 | F-0.01 379 | sg35 380 | F-1000 381 | sg37 382 | F-1000 383 | sg41 384 | F-100 385 | sg44 386 | F-5 387 | sg47 388 | F-18.5 389 | sg52 390 | F-1000 391 | sg23 392 | F-1000 393 | ssS'LPM' 394 | p94 395 | (dp95 396 | S'EX_ca2_e' 397 | p96 398 | F-0.0050000000000000001 399 | sS'EX_cit_e' 400 | p97 401 | F-0.00050000000000000001 402 | sS'EX_h_e' 403 | p98 404 | I-100 405 | sS'EX_mobd_e' 406 | p99 407 | F-0.0050000000000000001 408 | sS'EX_o2_e' 409 | p100 410 | F-18.5 411 | sS'EX_cobalt2_e' 412 | p101 413 | F-0.0050000000000000001 414 | sS'EX_mg2_e' 415 | p102 416 | F-0.0080000000000000002 417 | sS'EX_thm_e' 418 | p103 419 | F-2.9649835443413292e-08 420 | sS'EX_so4_e' 421 | p104 422 | F-1 423 | sS'EX_glyc_e' 424 | p105 425 | F-41.046802041481158 426 | sS'EX_co2_e' 427 | p106 428 | F-18.5 429 | sS'EX_pi_e' 430 | p107 431 | F-0.33700000000000002 432 | sS'EX_k_e' 433 | p108 434 | F-6 435 | sS'EX_cu2_e' 436 | p109 437 | F-0.0050000000000000001 438 | sS'EX_cl_e' 439 | p110 440 | F-5.016 441 | sS'EX_zn2_e' 442 | p111 443 | F-0.0050000000000000001 444 | sS'EX_h2o_e' 445 | p112 446 | I-1000 447 | sS'EX_nh4_e' 448 | p113 449 | F-15 450 | sS'EX_mn2_e' 451 | p114 452 | F-0.0050000000000000001 453 | sS'EX_fe3_e' 454 | p115 455 | F-0.0050000000000000001 456 | ssS'MgM' 457 | p116 458 | (dp117 459 | S'EX_ser__L_e' 460 | p118 461 | F-0.3115 462 | sS'EX_cobalt2_e' 463 | p119 464 | F-0.01 465 | sS'EX_pro__L_e' 466 | p120 467 | F-0.61850000000000005 468 | sS'EX_glu__L_e' 469 | p121 470 | F-0.80200000000000005 471 | sS'EX_glyc_e' 472 | p122 473 | F-38 474 | sS'EX_h_e' 475 | p123 476 | I-100 477 | sS'EX_mobd_e' 478 | p124 479 | F-0.01 480 | sS'EX_val__L_e' 481 | p125 482 | F-0.29999999999999999 483 | sS'EX_so4_e' 484 | p126 485 | F-8 486 | sS'EX_co2_e' 487 | p127 488 | F-18.5 489 | sS'EX_k_e' 490 | p128 491 | F-7.0026855387574392 492 | sS'EX_fe3_e' 493 | p129 494 | F-0.01 495 | sS'EX_na1_e' 496 | p130 497 | F-0.55198392330511803 498 | sS'EX_cl_e' 499 | p131 500 | F-5.6458532974921516 501 | sS'EX_leu__L_e' 502 | p132 503 | F-0.4385 504 | sS'EX_arg__L_e' 505 | p133 506 | F-0.1135 507 | sS'EX_nh4_e' 508 | p134 509 | F-0.35449999999999998 510 | sS'EX_lys__L_e' 511 | p135 512 | F-0.32450000000000001 513 | sS'EX_ala__L_e' 514 | p136 515 | F-0.22 516 | sS'EX_thr__L_e' 517 | p137 518 | F-0.20849999999999999 519 | sS'EX_pi_e' 520 | p138 521 | F-1 522 | sS'EX_mn2_e' 523 | p139 524 | F-0.01 525 | sS'EX_phe__L_e' 526 | p140 527 | F-0.045499999999999999 528 | sS'EX_h2o_e' 529 | p141 530 | I-1000 531 | sS'EX_his__L_e' 532 | p142 533 | F-0.097000000000000003 534 | sS'EX_o2_e' 535 | p143 536 | F-18.5 537 | sS'EX_tyr__L_e' 538 | p144 539 | F-0.0035000000000000001 540 | sS'EX_asp__L_e' 541 | p145 542 | F-0.32850000000000001 543 | sS'EX_gly_e' 544 | p146 545 | F-0.1555 546 | sS'EX_cys__L_e' 547 | p147 548 | F-0.014500000000000001 549 | sS'EX_cu2_e' 550 | p148 551 | F-0.01 552 | sS'EX_ca2_e' 553 | p149 554 | F-0.0020709616248315785 555 | sS'EX_mg2_e' 556 | p150 557 | F-0.0080983336761983136 558 | sS'EX_zn2_e' 559 | p151 560 | F-0.01 561 | sS'EX_met__L_e' 562 | p152 563 | F-0.1125 564 | sS'EX_ile__L_e' 565 | p153 566 | F-0.19600000000000001 567 | ss. --------------------------------------------------------------------------------