├── .gitignore ├── makedist.sh ├── tractconverter ├── formats │ ├── __init__.py │ ├── header.py │ ├── fib.py │ ├── tck.py │ ├── trk.py │ └── vtk.py ├── __init__.py ├── utils.py └── info.py ├── MANIFEST.in ├── .gitmodules ├── .travis.yml ├── README.md ├── scripts ├── TractInfo.py ├── tidy_vtk.py ├── TractConverter.py ├── TractMerger.py └── WalkingTractConverter.py ├── LICENSE ├── setup.py └── distribute_setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.sublime-* 3 | -------------------------------------------------------------------------------- /makedist.sh: -------------------------------------------------------------------------------- 1 | python setup.py build sdist -------------------------------------------------------------------------------- /tractconverter/formats/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include *.txt 2 | recursive-include docs *.txt -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "tests"] 2 | path = tests 3 | url = git://github.com/MarcCote/tractconverter_test.git 4 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | # - "3.3" 4 | - "2.7" 5 | # - "pypy" 6 | # command to install dependencies, e.g. pip install -r requirements.txt --use-mirrors 7 | install: 8 | - "pip install nibabel" 9 | # command to run tests, e.g. python setup.py test 10 | script: nosetests -v 11 | -------------------------------------------------------------------------------- /tractconverter/__init__.py: -------------------------------------------------------------------------------- 1 | from tractconverter.utils import convert 2 | from tractconverter.utils import merge 3 | from tractconverter.utils import is_supported 4 | from tractconverter.utils import detect_format 5 | from tractconverter.utils import FORMATS 6 | from tractconverter.utils import EXT_ANAT 7 | 8 | from tractconverter.formats.tck import TCK 9 | from tractconverter.formats.trk import TRK 10 | from tractconverter.formats.vtk import VTK 11 | from tractconverter.formats.fib import FIB 12 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | TractConverter (deprecated) 2 | =========================== 3 | 4 | **The TractConverter is now deprecated, please use Nibabel streamlines API instead (e.g. `nibabel.streamlines.load()`).** 5 | 6 | TractConverter is a python toolbox to convert tractogram files. 7 | 8 | TractConverter is for research only; please do not use results 9 | from TractConverter on clinical data. 10 | 11 | 12 | Installation 13 | ============ 14 | 15 | ``sudo pip install http://github.com/MarcCote/tractconverter/archive/master.zip`` 16 | 17 | Code 18 | ==== 19 | [![Build Status](https://travis-ci.org/MarcCote/tractconverter.png)](https://travis-ci.org/MarcCote/tractconverter) 20 | 21 | You can find our sources: 22 | 23 | * [Main repository](https://github.com/MarcCote/tractconverter) on Github. 24 | * Download as a tar/zip file the [current trunk](https://github.com/MarcCote/tractconverter/archive/master.zip). 25 | * Download [latest release](https://github.com/MarcCote/tractconverter/releases/tag/v0.8). 26 | 27 | License 28 | ======= 29 | 30 | TractConverter is licensed under the terms of the BSD license. Some code included with 31 | TractConverter is also licensed under the BSD license. Please see the LICENSE file in the 32 | TractConverter distribution. 33 | -------------------------------------------------------------------------------- /tractconverter/formats/header.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Created on 2012-02-22 3 | 4 | @author: coteharn 5 | ''' 6 | import nibabel as nib 7 | import numpy as np 8 | 9 | 10 | class Header: 11 | NB_FIBERS = 0 12 | STEP = 1 13 | METHOD = 2 14 | NB_SCALARS_BY_POINT = 3 15 | NB_PROPERTIES_BY_TRACT = 4 16 | NB_POINTS = 5 17 | VOXEL_SIZES = 6 18 | DIMENSIONS = 7 19 | MAGIC_NUMBER = 8 20 | ORIGIN = 9 21 | VOXEL_TO_WORLD = 10 22 | VOXEL_ORDER = 11 23 | WORLD_ORDER = 12 24 | ENDIAN = 13 25 | 26 | 27 | def get_header_from_anat(anat_file, hdr={}): 28 | if anat_file is None: 29 | if len(hdr) == 0: 30 | # Defaults 31 | hdr[Header.VOXEL_SIZES] = (0, 0, 0) 32 | hdr[Header.DIMENSIONS] = (1, 1, 1) 33 | 34 | return hdr 35 | 36 | anat = nib.load(anat_file) 37 | 38 | hdr[Header.VOXEL_SIZES] = tuple(anat.get_header().get_zooms())[:3] 39 | hdr[Header.DIMENSIONS] = tuple(anat.get_header().get_data_shape())[:3] 40 | hdr[Header.VOXEL_TO_WORLD] = anat.get_header().get_best_affine() 41 | 42 | # We can guess the voxel order from the affine if there is no 0 on the diagonal. 43 | if not np.any(np.diag(hdr[Header.VOXEL_TO_WORLD]) == 0): 44 | hdr[Header.VOXEL_ORDER] = ''.join(nib.aff2axcodes(hdr[Header.VOXEL_TO_WORLD])) 45 | 46 | return hdr 47 | -------------------------------------------------------------------------------- /scripts/TractInfo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import argparse 4 | import logging 5 | import os 6 | import tractconverter.info as info 7 | 8 | import tractconverter 9 | from tractconverter import FORMATS 10 | from tractconverter import EXT_ANAT 11 | 12 | # Script description 13 | DESCRIPTION = """ 14 | TractInfo {0}. 15 | Print info about a streamlines file. 16 | Supported formats are {1} 17 | """.format(info.__version__, 18 | ",".join(FORMATS.keys())) 19 | 20 | 21 | ##### 22 | # Script part 23 | ### 24 | def buildArgsParser(): 25 | p = argparse.ArgumentParser(description=DESCRIPTION) 26 | p.add_argument('-i', action='store', dest='input', 27 | metavar='FILE', required=True, 28 | help='input track file ({0})'.format(",".join(FORMATS.keys()))) 29 | return p 30 | 31 | 32 | def main(): 33 | parser = buildArgsParser() 34 | args = parser.parse_args() 35 | 36 | in_filename = args.input 37 | 38 | if not os.path.isfile(in_filename): 39 | parser.error('"{0}" must be an existing file!'.format(in_filename)) 40 | 41 | if not tractconverter.is_supported(in_filename): 42 | parser.error('Input file must be one of {0}!'.format(",".join(FORMATS.keys()))) 43 | 44 | inFormat = tractconverter.detect_format(in_filename) 45 | 46 | #Print info about the input file. 47 | print inFormat(in_filename, None) 48 | 49 | if __name__ == "__main__": 50 | main() 51 | -------------------------------------------------------------------------------- /scripts/tidy_vtk.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import os 5 | import argparse 6 | 7 | from itertools import islice 8 | 9 | 10 | def take(n, iterable): 11 | "Return first n items of the iterable as a list" 12 | return list(islice(iterable, n)) 13 | 14 | 15 | def parse_arguments(): 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument('input_vtk', type=str, help='VTK file to beautify.') 18 | parser.add_argument('output_vtk', type=str, help="Name of the beautified VTK file.") 19 | args = parser.parse_args() 20 | 21 | # Check for invalid arguments 22 | if not os.path.isfile(args.input_vtk): 23 | parser.error("Invalid file path. Specify path to a file.") 24 | 25 | if os.path.isfile(args.output_vtk): 26 | parser.error("Output file already exists, will not overwrite.") 27 | 28 | return args 29 | 30 | 31 | def main(): 32 | args = parse_arguments() 33 | 34 | lines = list(open(args.input_vtk)) 35 | 36 | for i, l in enumerate(lines): 37 | if l.startswith('LINES'): 38 | break 39 | 40 | i += 1 # We need to format the line after section LINES. 41 | it = iter(lines[i].split()) 42 | 43 | formatted_lines = [] 44 | try: 45 | while True: 46 | nb_points = next(it) 47 | formatted_lines.append(" ".join([nb_points] + take(int(nb_points), it)) + "\n") 48 | except StopIteration: 49 | pass 50 | 51 | lines = lines[:i] + formatted_lines + lines[i+1:] 52 | 53 | open(args.output_vtk, 'w').write("".join(lines)) 54 | 55 | if __name__ == '__main__': 56 | main() 57 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2009-2010, SCIL 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are 6 | met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | 11 | * Redistributions in binary form must reproduce the above 12 | copyright notice, this list of conditions and the following 13 | disclaimer in the documentation and/or other materials provided 14 | with the distribution. 15 | 16 | * Neither the name of the tractconverter developers nor the names of any 17 | contributors may be used to endorse or promote products derived 18 | from this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /tractconverter/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | from pdb import set_trace as dbg 4 | 5 | from tractconverter.formats.tck import TCK 6 | from tractconverter.formats.trk import TRK 7 | from tractconverter.formats.fib import FIB 8 | from tractconverter.formats.vtk import VTK 9 | 10 | # Supported format 11 | FORMATS = {"tck": TCK, 12 | "trk": TRK, 13 | "fib": FIB, 14 | "vtk": VTK} 15 | 16 | # Input and output extensions. 17 | EXT_ANAT = ".nii|.nii.gz" 18 | 19 | 20 | def is_supported(filename): 21 | return detect_format(filename) is not None 22 | 23 | 24 | def detect_format(filename): 25 | if not os.path.isfile(filename): 26 | return FORMATS.get(filename[-3:], None) 27 | 28 | for format in FORMATS.values(): 29 | if format._check(filename): 30 | return format 31 | 32 | return None 33 | 34 | 35 | def convert(input, output, verbose=False, keep_open=False): 36 | from tractconverter.formats.header import Header 37 | 38 | nbFibers = 0 39 | fibers = [] 40 | 41 | display_threshold = 10000 if input.hdr[Header.NB_FIBERS] > 100000 else 1000 42 | 43 | for i, f in enumerate(input): 44 | fibers.append(f) 45 | if (i + 1) % 1000 == 0: 46 | output += fibers 47 | fibers = [] 48 | 49 | if i % display_threshold == 0: 50 | logging.info('(' + str(nbFibers) + "/" + str(input.hdr[Header.NB_FIBERS]) + ' fibers)') 51 | 52 | nbFibers += 1 53 | 54 | if len(fibers) > 0: 55 | output += fibers 56 | 57 | if not keep_open: 58 | output.close() 59 | 60 | logging.info('Done! (' + str(nbFibers) + "/" + str(input.hdr[Header.NB_FIBERS]) + ' fibers)') 61 | return nbFibers 62 | 63 | 64 | def merge(inputs, output, verbose=False): 65 | #from tractconverter.formats.header import Header 66 | 67 | #streamlines = [] 68 | nb_streamlines = 0 69 | for input in inputs: 70 | nb_streamlines += convert(input, output, verbose=verbose, keep_open=True) 71 | #streamlines += [s for s in f] 72 | 73 | #output.hdr[Header.NB_FIBERS] = len(streamlines) 74 | #output.writeHeader() # Update existing header 75 | #output += streamlines 76 | 77 | # I'm not sure this is doing something anyway. 78 | output.close() 79 | 80 | logging.info('Done! (' + str(nb_streamlines) + " streamlines merged.)") 81 | -------------------------------------------------------------------------------- /tractconverter/info.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | 3 | """ This file contains defines parameters for tractconverter that we use to fill 4 | settings in setup.py, the tractconverter top-level docstring, and for building the 5 | docs. In setup.py in particular, we exec this file, so it cannot import tractconverter 6 | """ 7 | 8 | # tractconverter version information. An empty _version_extra corresponds to a 9 | # full release. '.dev' as a _version_extra string means this is a development 10 | # version 11 | _version_major = 0 12 | _version_minor = 8 13 | _version_micro = 1 14 | #_version_extra = '.dev' 15 | _version_extra = '' 16 | 17 | # Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z" 18 | __version__ = "%s.%s.%s%s" % (_version_major, 19 | _version_minor, 20 | _version_micro, 21 | _version_extra) 22 | 23 | CLASSIFIERS = ["Development Status :: 3 - Alpha", 24 | "Environment :: Console", 25 | "Intended Audience :: Science/Research", 26 | "License :: OSI Approved :: BSD License", 27 | "Operating System :: OS Independent", 28 | "Programming Language :: Python", 29 | "Topic :: Scientific/Engineering"] 30 | 31 | description = 'Tractogram converter in python' 32 | 33 | # Note: this long_description is actually a copy/paste from the top-level 34 | # README.txt, so that it shows up nicely on PyPI. So please remember to edit 35 | # it only in one place and sync it correctly. 36 | long_description = """ 37 | ================ 38 | TractConverter 39 | ================ 40 | 41 | TractConverter is a python toolbox to convert tractogram files. 42 | 43 | TractConverter is for research only; please do not use results 44 | from TractConverter on clinical data. 45 | 46 | Website 47 | ======= 48 | 49 | N/A 50 | 51 | Mailing Lists 52 | ============= 53 | 54 | N/A 55 | 56 | Code 57 | ==== 58 | 59 | You can find our sources and single-click downloads: 60 | 61 | * `Main repository`_ on Github. 62 | * Documentation_ for all releases and current development tree. 63 | * Download as a tar/zip file the `current trunk`_. 64 | * Downloads of all `available releases`_. 65 | 66 | .. _main repository: http://github.com/MarcCote/tractconverter 67 | .. _Documentation: N/A 68 | .. _current trunk: http://github.com/MarcCote/tractconverter/master 69 | .. _available releases: N/A 70 | 71 | License 72 | ======= 73 | 74 | tractconverter is licensed under the terms of the BSD license. Some code included with 75 | tractconverter is also licensed under the BSD license. Please the LICENSE file in the 76 | tractconverter distribution. 77 | """ 78 | 79 | # versions for dependencies 80 | NUMPY_MIN_VERSION='1.7' 81 | NIBABEL_MIN_VERSION='1.0.0' 82 | 83 | # Main setup parameters 84 | NAME = 'tractconverter' 85 | MAINTAINER = "Marc-Alexandre Côté" 86 | MAINTAINER_EMAIL = "marc-alexandre.cote@usherbrooke.ca" 87 | DESCRIPTION = description 88 | LONG_DESCRIPTION = long_description 89 | URL = "N/A" 90 | DOWNLOAD_URL = "N/A" 91 | LICENSE = "BSD license" 92 | CLASSIFIERS = CLASSIFIERS 93 | AUTHOR = "SCIL" 94 | AUTHOR_EMAIL = "scil@gmail.com" 95 | PLATFORMS = "OS Independent" 96 | MAJOR = _version_major 97 | MINOR = _version_minor 98 | MICRO = _version_micro 99 | ISRELEASE = _version_extra == '' 100 | VERSION = __version__ 101 | PROVIDES = ["tractconverter"] 102 | REQUIRES = ["numpy (>=%s)" % NUMPY_MIN_VERSION, 103 | "nibabel (>=%s)" % NIBABEL_MIN_VERSION] 104 | -------------------------------------------------------------------------------- /scripts/TractConverter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | ''' 3 | Created on 2012-02-10 4 | 5 | @author: coteharn 6 | ''' 7 | import argparse 8 | import logging 9 | import os 10 | import tractconverter.info as info 11 | 12 | import tractconverter 13 | from tractconverter import FORMATS 14 | from tractconverter import EXT_ANAT 15 | 16 | # Script description 17 | DESCRIPTION = """ 18 | TractConverter {0}. 19 | Convert streamlines files. 20 | Supported formats are {1} 21 | """.format(info.__version__, 22 | ",".join(FORMATS.keys())) 23 | 24 | 25 | ##### 26 | # Script part 27 | ### 28 | def buildArgsParser(): 29 | p = argparse.ArgumentParser(description=DESCRIPTION) 30 | p.add_argument('-i', action='store', dest='input', 31 | metavar='FILE', required=True, 32 | help='input track file ({0})'.format(",".join(FORMATS.keys()))) 33 | p.add_argument('-o', action='store', dest='output', 34 | metavar='FILE', required=True, 35 | help='output track file ({0})'.format(",".join(FORMATS.keys()))) 36 | p.add_argument('-a', action='store', dest='anat', 37 | metavar='FILE', required=False, 38 | help='input anatomy file ({0})'.format(EXT_ANAT)) 39 | p.add_argument('-f', action='store_true', dest='isForce', 40 | help='force (pass extension check; overwrite output file)') 41 | p.add_argument('-v', action='store_true', dest='isVerbose', 42 | help='produce verbose output') 43 | return p 44 | 45 | 46 | def main(): 47 | parser = buildArgsParser() 48 | args = parser.parse_args() 49 | 50 | in_filename = args.input 51 | out_filename = args.output 52 | anat_filename = args.anat 53 | isForcing = args.isForce 54 | isVerbose = args.isVerbose 55 | 56 | if isVerbose: 57 | logging.basicConfig(level=logging.DEBUG) 58 | 59 | if not os.path.isfile(in_filename): 60 | parser.error('"{0}" must be an existing file!'.format(in_filename)) 61 | 62 | if not tractconverter.is_supported(in_filename): 63 | parser.error('Input file must be one of {0}!'.format(",".join(FORMATS.keys()))) 64 | 65 | if not tractconverter.is_supported(out_filename): 66 | parser.error('Output file must be one of {0}!'.format(",".join(FORMATS.keys()))) 67 | 68 | if os.path.isfile(out_filename): 69 | if isForcing: 70 | if out_filename == in_filename: 71 | parser.error('Cannot use the same name for input and output files. Conversion would fail.') 72 | else: 73 | logging.info('Overwriting "{0}".'.format(out_filename)) 74 | else: 75 | parser.error('"{0}" already exist! Use -f to overwrite it.'.format(out_filename)) 76 | 77 | inFormat = tractconverter.detect_format(in_filename) 78 | outFormat = tractconverter.detect_format(out_filename) 79 | 80 | #if inFormat == outFormat: 81 | # parser.error('Input and output must be from different types!'.format(",".join(FORMATS.keys()))) 82 | 83 | if anat_filename is not None: 84 | if not any(map(anat_filename.endswith, EXT_ANAT.split('|'))): 85 | if isForcing: 86 | logging.info('Reading "{0}" as a {1} file.'.format(anat_filename.split("/")[-1], EXT_ANAT)) 87 | else: 88 | parser.error('Anatomy file must be one of {1}!'.format(EXT_ANAT)) 89 | 90 | if not os.path.isfile(anat_filename): 91 | parser.error('"{0}" must be an existing file!'.format(anat_filename)) 92 | 93 | #Convert input to output 94 | input = inFormat(in_filename, anat_filename) 95 | output = outFormat.create(out_filename, input.hdr, anat_filename) 96 | tractconverter.convert(input, output) 97 | 98 | if __name__ == "__main__": 99 | main() 100 | -------------------------------------------------------------------------------- /scripts/TractMerger.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import argparse 4 | import logging 5 | import os 6 | import copy 7 | import tractconverter.info as info 8 | 9 | import tractconverter 10 | from tractconverter import FORMATS 11 | from tractconverter import EXT_ANAT 12 | 13 | from tractconverter.formats.header import Header 14 | 15 | # Script description 16 | DESCRIPTION = """ 17 | TractMerger {0}. 18 | Merge streamlines files. 19 | Supported formats are {1} 20 | """.format(info.__version__, 21 | ",".join(FORMATS.keys())) 22 | 23 | 24 | ##### 25 | # Script part 26 | ### 27 | def buildArgsParser(): 28 | p = argparse.ArgumentParser(description=DESCRIPTION) 29 | p.add_argument('-i', action='store', dest='input', nargs='+', 30 | metavar='FILE', required=True, 31 | help='input streamlines file ({0})'.format(",".join(FORMATS.keys()))) 32 | p.add_argument('-o', action='store', dest='output', 33 | metavar='FILE', required=True, 34 | help='merged streamline file ({0})'.format(",".join(FORMATS.keys()))) 35 | # p.add_argument('-a', action='store', dest='anat', 36 | # metavar='FILE', required=False, 37 | # help='input anatomy file ({0})'.format(EXT_ANAT)) 38 | p.add_argument('-f', action='store_true', dest='isForce', 39 | help='force (pass extension check; overwrite output file)') 40 | p.add_argument('-v', action='store_true', dest='isVerbose', 41 | help='produce verbose output') 42 | return p 43 | 44 | 45 | def main(): 46 | parser = buildArgsParser() 47 | args = parser.parse_args() 48 | 49 | in_filenames = args.input 50 | out_filename = args.output 51 | #anat_filename = args.anat 52 | isForcing = args.isForce 53 | isVerbose = args.isVerbose 54 | 55 | if isVerbose: 56 | logging.basicConfig(level=logging.DEBUG) 57 | 58 | for in_filename in in_filenames: 59 | if not os.path.isfile(in_filename): 60 | parser.error('"{0}" must be an existing file!'.format(in_filename)) 61 | 62 | if not tractconverter.is_supported(in_filename): 63 | parser.error('Input file must be one of {0}!'.format(",".join(FORMATS.keys()))) 64 | 65 | if not tractconverter.is_supported(out_filename): 66 | parser.error('Output file must be one of {0}!'.format(",".join(FORMATS.keys()))) 67 | 68 | if os.path.isfile(out_filename): 69 | if isForcing: 70 | if any(in_name == out_filename for in_name in in_filenames): 71 | parser.error('Cannot output to a file which is also an input file ({0}).'.format(out_filename)) 72 | else: 73 | logging.info('Overwriting "{0}".'.format(out_filename)) 74 | else: 75 | parser.error('"{0}" already exist! Use -f to overwrite it.'.format(out_filename)) 76 | 77 | inFormats = [tractconverter.detect_format(in_filename) for in_filename in in_filenames] 78 | outFormat = tractconverter.detect_format(out_filename) 79 | 80 | # if anat_filename is not None: 81 | # if not any(map(anat_filename.endswith, EXT_ANAT.split('|'))): 82 | # if isForcing: 83 | # logging.info('Reading "{0}" as a {1} file.'.format(anat_filename.split("/")[-1], EXT_ANAT)) 84 | # else: 85 | # parser.error('Anatomy file must be one of {1}!'.format(EXT_ANAT)) 86 | 87 | # if not os.path.isfile(anat_filename): 88 | # parser.error('"{0}" must be an existing file!'.format(anat_filename)) 89 | 90 | #TODO: Consider streamlines files with different anat/space ? 91 | 92 | # Use information from the first streamlines file to create the header. 93 | hdr = copy.deepcopy(inFormats[0](in_filenames[0]).hdr) 94 | hdr[Header.NB_FIBERS] = 0 # The actual number of streamlines will be added later (as we add the streamlines). 95 | 96 | #Merge inputs to output 97 | inputs = (in_format(in_filename) for in_filename, in_format in zip(in_filenames, inFormats)) 98 | output = outFormat.create(out_filename, hdr) 99 | tractconverter.merge(inputs, output) 100 | 101 | if __name__ == "__main__": 102 | main() 103 | -------------------------------------------------------------------------------- /tractconverter/formats/fib.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | 3 | import copy 4 | import numpy as np 5 | from tractconverter.formats.header import Header as H 6 | from vtk import VTK 7 | 8 | 9 | class FIB: 10 | MAGIC_NUMBER = "fib" # Not really one... 11 | # self.hdr 12 | # self.filename 13 | 14 | ##### 15 | # Static Methods 16 | ### 17 | @staticmethod 18 | def _check(filename): 19 | if VTK._check(filename): 20 | return False 21 | 22 | return filename[-3:].lower() == FIB.MAGIC_NUMBER 23 | 24 | @staticmethod 25 | def create(filename, hdr=None, anatFile=None): 26 | f = open(filename, 'wb') 27 | f.write(FIB.MAGIC_NUMBER + "\n") 28 | f.close() 29 | 30 | if hdr is None: 31 | hdr = VTK.get_empty_header() 32 | else: 33 | hdr = copy.deepcopy(hdr) 34 | 35 | fib = FIB(filename, load=False) 36 | fib.hdr = hdr 37 | fib.writeHeader() 38 | 39 | return fib 40 | 41 | ##### 42 | # Methods 43 | ### 44 | def __init__(self, filename, anatFile=None, load=True): 45 | if not FIB._check(filename): 46 | raise NameError("Not a FIB file.") 47 | 48 | self.filename = filename 49 | self.hdr = {} 50 | if load: 51 | self._load() 52 | 53 | def _load(self): 54 | f = open(self.filename, 'rb') 55 | 56 | ##### 57 | # Read header 58 | ### 59 | # Skip pseudo "magic number" 60 | f.readline() 61 | 62 | # Skip the 5 next lines 63 | f.readline() # 4 min max mean var 64 | f.readline() # 1 65 | f.readline() # 4 0 0 0 0 66 | f.readline() # 4 0 0 0 0 67 | f.readline() # 4 0 0 0 0 68 | 69 | # Read number of fibers 70 | self.hdr[H.NB_FIBERS] = int(f.readline().split()[0]) 71 | self.hdr[H.NB_POINTS] = len(f.readlines()) - 2 * self.hdr[H.NB_FIBERS] 72 | 73 | f.close() 74 | 75 | @classmethod 76 | def get_empty_header(cls): 77 | hdr = {} 78 | 79 | #Default values 80 | hdr[H.NB_FIBERS] = 0 81 | 82 | return hdr 83 | 84 | def writeHeader(self): 85 | f = open(self.filename, 'wb') 86 | 87 | f.write("1 FA\n") 88 | f.write("4 min max mean var\n") 89 | f.write("1\n") 90 | f.write("4 0 0 0 0\n") 91 | f.write("4 0 0 0 0\n") 92 | f.write("4 0 0 0 0\n") 93 | f.write("{0} 0.5\n".format(self.hdr[H.NB_FIBERS])) 94 | 95 | f.close() 96 | 97 | def close(self): 98 | pass 99 | 100 | ##### 101 | # Append fiber to file 102 | # TODO: make it really dynamic if possible (like trk and tck). 103 | ### 104 | def __iadd__(self, fibers): 105 | f = open(self.filename, 'ab') 106 | 107 | for fib in fibers: 108 | lines = [] 109 | lines.append("0 {0}".format(len(fib))) 110 | lines.append("1") 111 | lines += [" ".join(map(str, pts)) + " 0" for pts in fib] 112 | 113 | f.write("\n".join(lines) + "\n") 114 | f.close() 115 | 116 | return self 117 | 118 | ##### 119 | # Iterate through fibers from file 120 | ### 121 | def __iter__(self): 122 | 123 | f = open(self.filename, 'rb') 124 | 125 | # Skip header 126 | for i in range(7): 127 | f.readline() 128 | 129 | for i in range(self.hdr[H.NB_FIBERS]): 130 | line = f.readline() 131 | nbBackward, nbForward = map(int, line.split()) 132 | f.readline() # Skip (unused) 133 | # nbPoints = nbBackward + nbForward - int(nbBackward > 0 and nbForward > 0) 134 | pts = [] 135 | for j in range(nbBackward): 136 | pts.append(f.readline().split()[:3]) 137 | 138 | pts = pts[::-1] 139 | if nbForward > 0 and nbBackward > 0: 140 | f.readline() # Skip redundant points 141 | nbForward -= 1 142 | 143 | for j in range(nbForward): 144 | pts.append(f.readline().split()[:3]) 145 | 146 | pts = np.array(pts, " 0: 34 | force_setuptools = True 35 | else: 36 | force_setuptools = False 37 | 38 | if force_setuptools: 39 | # Try to preempt setuptools monkeypatching of Extension handling when Pyrex 40 | # is missing. Otherwise the monkeypatched Extension will change .pyx 41 | # filenames to .c filenames, and we probably don't have the .c files. 42 | sys.path.insert(0, pjoin(dirname(__file__), 'fake_pyrex')) 43 | import setuptools 44 | 45 | # We may just have imported setuptools, or we may have been exec'd from a 46 | # setuptools environment like pip 47 | if 'setuptools' in sys.modules: 48 | extra_setuptools_args = dict( 49 | tests_require=['nose'], 50 | test_suite='nose.collector', 51 | zip_safe=False, 52 | extras_require=dict( 53 | doc=['Sphinx>=1.0'], 54 | test=['nose>=0.10.1']), 55 | install_requires=['nibabel>=' + NIBABEL_MIN_VERSION]) 56 | 57 | # We need setuptools install command because we're going to override it 58 | # further down. Using distutils install command causes some confusion, due 59 | # to the Pyrex / setuptools hack above (force_setuptools) 60 | from setuptools.command import install 61 | else: 62 | extra_setuptools_args = {} 63 | from distutils.command import install 64 | 65 | # Import distutils _after_ potential setuptools import above, and after removing 66 | # MANIFEST 67 | from distutils.core import setup 68 | from distutils.extension import Extension 69 | 70 | # Define extensions 71 | EXTS = [] 72 | 73 | # Do our own build and install time dependency checking. setup.py gets called in 74 | # many different ways, and may be called just to collect information (egg_info). 75 | # We need to set up tripwires to raise errors when actually doing things, like 76 | # building, rather than unconditionally in the setup.py import or exec 77 | # We may make tripwire versions of build_ext, build_py, install 78 | try: 79 | from nisext.sexts import package_check, get_comrec_build 80 | except ImportError: # No nibabel 81 | msg = ('Need nisext package from nibabel installation' 82 | ' - please install nibabel first') 83 | # pybuilder = derror_maker(build_py.build_py, msg) 84 | # extbuilder = derror_maker(build_ext.build_ext, msg) 85 | # else: # We have nibabel 86 | # pybuilder = get_comrec_build('tractconverter') 87 | # Cython is a dependency for building extensions, iff we don't have stamped 88 | # up pyx and c files. 89 | # extbuilder = cyproc_exts(EXTS, CYTHON_MIN_VERSION, 'pyx-stamps') 90 | 91 | # Installer that checks for install-time dependencies 92 | 93 | 94 | class installer(install.install): 95 | 96 | def run(self): 97 | package_check('numpy', NUMPY_MIN_VERSION) 98 | package_check('nibabel', NIBABEL_MIN_VERSION) 99 | install.install.run(self) 100 | 101 | 102 | cmdclass = dict( 103 | install=installer) 104 | 105 | 106 | def main(**extra_args): 107 | setup(name=NAME, 108 | maintainer=MAINTAINER, 109 | maintainer_email=MAINTAINER_EMAIL, 110 | description=DESCRIPTION, 111 | long_description=LONG_DESCRIPTION, 112 | url=URL, 113 | download_url=DOWNLOAD_URL, 114 | license=LICENSE, 115 | classifiers=CLASSIFIERS, 116 | author=AUTHOR, 117 | author_email=AUTHOR_EMAIL, 118 | platforms=PLATFORMS, 119 | version=VERSION, 120 | requires=REQUIRES, 121 | provides=PROVIDES, 122 | packages=['tractconverter', 123 | 'tractconverter.formats' 124 | ], 125 | ext_modules=EXTS, 126 | package_data={'tractconverter': 127 | [pjoin('data', '*') 128 | ]}, 129 | data_files=[('share/doc/tractconverter/examples', 130 | glob(pjoin('doc', 'examples', '*.py')))], 131 | scripts=glob(pjoin('scripts', '*')), 132 | cmdclass=cmdclass, 133 | **extra_args 134 | ) 135 | 136 | # simple way to test what setup will do 137 | # python setup.py install --prefix=/tmp 138 | if __name__ == "__main__": 139 | main(**extra_setuptools_args) 140 | -------------------------------------------------------------------------------- /scripts/WalkingTractConverter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | ''' 3 | Created on 2012-02-10 4 | 5 | @author: coteharn 6 | ''' 7 | import os 8 | import os.path as path 9 | 10 | import tractconverter 11 | import argparse 12 | import logging 13 | 14 | from tractconverter import FORMATS 15 | from tractconverter import EXT_ANAT 16 | 17 | 18 | def walkAndConvert(p_input, p_conversions, p_output=None, p_anatFile=None, p_isRecursive=False, p_overwrite=False): 19 | 20 | for root, dirs, allFiles in os.walk(p_input): 21 | logging.info('Processing "{0}"...'.format(root)) 22 | root = root + "/" 23 | nbFiles = 0 24 | for k, v in p_conversions.items(): 25 | #files = [f for f in allFiles if FORMATS[k]._check(root + f)] 26 | for i, f in enumerate(allFiles): 27 | logging.info('{0}/{1} files'.format(i, len(allFiles))) 28 | 29 | if not FORMATS[k]._check(root + f): 30 | logging.info('Skip') 31 | continue 32 | 33 | nbFiles += 1 34 | inFile = root + f 35 | 36 | if p_output is not None: 37 | outFile = p_output + '/' + f[:-3] + v 38 | else: 39 | outFile = inFile[:-3] + v 40 | 41 | if path.exists(outFile) and not p_overwrite: 42 | logging.info(f + " : Already Done!!!") 43 | continue 44 | 45 | input = FORMATS[k](inFile, p_anatFile) 46 | output = FORMATS[v].create(outFile, input.hdr, p_anatFile) 47 | tractconverter.convert(input, output) 48 | logging.info(inFile) 49 | 50 | logging.info('{0} skipped (none track files)'.format(len(allFiles) - nbFiles)) 51 | if not p_isRecursive: 52 | break 53 | 54 | logging.info("Conversion finished!") 55 | 56 | ##### 57 | # Script part 58 | ### 59 | 60 | #Script description 61 | DESCRIPTION = 'Convert streamlines files while walking down a path. ({0})'.format(",".join(FORMATS.keys())) 62 | 63 | 64 | def buildArgsParser(): 65 | p = argparse.ArgumentParser(description=DESCRIPTION) 66 | p.add_argument('-i', action='store', dest='input', 67 | metavar='DIR', required=True, 68 | help='path to walk') 69 | p.add_argument('-o', action='store', dest='output', 70 | metavar='DIR', 71 | help='output folder (if omitted, the walking folder is used)') 72 | p.add_argument('-a', action='store', dest='anat', 73 | metavar='FILE', required=False, 74 | help='anatomy file ({0})'.format(EXT_ANAT)) 75 | 76 | #VTK 77 | p.add_argument('-vtk2tck', action='store_true', dest='vtk2tck', 78 | help='convert .vtk to .tck (anatomy needed)') 79 | p.add_argument('-vtk2trk', action='store_true', dest='vtk2trk', 80 | help='convert .vtk to .trk') 81 | p.add_argument('-vtk2fib', action='store_true', dest='vtk2fib', 82 | help='convert .vtk to .fib') 83 | #FIB 84 | p.add_argument('-fib2tck', action='store_true', dest='fib2tck', 85 | help='convert .fib to .tck (anatomy needed)') 86 | p.add_argument('-fib2trk', action='store_true', dest='fib2trk', 87 | help='convert .fib to .trk') 88 | p.add_argument('-fib2vtk', action='store_true', dest='fib2vtk', 89 | help='convert .fib to .vtk') 90 | #TCK 91 | p.add_argument('-tck2fib', action='store_true', dest='tck2fib', 92 | help='convert .tck to .fib (anatomy needed)') 93 | p.add_argument('-tck2trk', action='store_true', dest='tck2trk', 94 | help='convert .tck to .trk (anatomy needed)') 95 | p.add_argument('-tck2vtk', action='store_true', dest='tck2vtk', 96 | help='convert .tck to .vtk (anatomy needed)') 97 | #TRK 98 | p.add_argument('-trk2tck', action='store_true', dest='trk2tck', 99 | help='convert .trk to .tck (anatomy needed)') 100 | p.add_argument('-trk2fib', action='store_true', dest='trk2fib', 101 | help='convert .trk to .fib') 102 | p.add_argument('-trk2vtk', action='store_true', dest='trk2vtk', 103 | help='convert .trk to .vtk') 104 | 105 | p.add_argument('-R', action='store_true', dest='isRecursive', 106 | help='make a recursive walk') 107 | p.add_argument('-f', action='store_true', dest='isForce', 108 | help='force (pass extension check; overwrite output file)') 109 | p.add_argument('-v', action='store_true', dest='isVerbose', 110 | help='produce verbose output') 111 | 112 | return p 113 | 114 | 115 | def main(): 116 | parser = buildArgsParser() 117 | args = parser.parse_args() 118 | 119 | input = args.input 120 | output = args.output 121 | anat = args.anat 122 | vtk2tck = args.vtk2tck 123 | vtk2trk = args.vtk2trk 124 | vtk2fib = args.vtk2fib 125 | fib2tck = args.fib2tck 126 | fib2trk = args.fib2trk 127 | fib2vtk = args.fib2vtk 128 | trk2tck = args.trk2tck 129 | trk2fib = args.trk2fib 130 | trk2vtk = args.trk2vtk 131 | tck2trk = args.tck2trk 132 | tck2fib = args.tck2fib 133 | tck2vtk = args.tck2vtk 134 | isRecursive = args.isRecursive 135 | isForcing = args.isForce 136 | isVerbose = args.isVerbose 137 | 138 | if isVerbose: 139 | logging.basicConfig(level=logging.DEBUG) 140 | 141 | if not os.path.isdir(input): 142 | parser.error('"{0}" must be a folder!'.format(input)) 143 | 144 | if output is not None: 145 | if not os.path.isdir(output): 146 | if isForcing: 147 | logging.info('Creating "{0}".'.format(output)) 148 | os.makedirs(output) 149 | else: 150 | parser.error("Can't find the output folder") 151 | 152 | #TODO: Warn if duplicate conversion (i.e. tck2X, tck2Y) 153 | #TODO: Find better way to add multiple conversions. 154 | conversions = {} 155 | if vtk2tck: 156 | conversions['vtk'] = 'tck' 157 | if vtk2trk: 158 | conversions['vtk'] = 'trk' 159 | if vtk2fib: 160 | conversions['vtk'] = 'fib' 161 | if fib2tck: 162 | conversions['fib'] = 'tck' 163 | if fib2trk: 164 | conversions['fib'] = 'trk' 165 | if fib2vtk: 166 | conversions['fib'] = 'vtk' 167 | if trk2tck: 168 | conversions['trk'] = 'tck' 169 | if trk2fib: 170 | conversions['trk'] = 'fib' 171 | if trk2vtk: 172 | conversions['trk'] = 'vtk' 173 | if tck2trk: 174 | conversions['tck'] = 'trk' 175 | if tck2fib: 176 | conversions['tck'] = 'fib' 177 | if tck2vtk: 178 | conversions['tck'] = 'vtk' 179 | 180 | if len(conversions) == 0: 181 | parser.error('Nothing to convert! Please specify at least one conversion.') 182 | 183 | if anat is not None: 184 | if not any(map(anat.endswith, EXT_ANAT.split('|'))): 185 | if isForcing: 186 | logging.info('Reading "{0}" as a {1} file.'.format(anat.split("/")[-1], EXT_ANAT)) 187 | else: 188 | parser.error('Anatomy file must be one of {0}!'.format(EXT_ANAT)) 189 | 190 | if not os.path.isfile(anat): 191 | parser.error('"{0}" must be an existing file!'.format(anat)) 192 | 193 | walkAndConvert(input, conversions, output, anat, isRecursive, isForcing) 194 | 195 | if __name__ == "__main__": 196 | main() 197 | -------------------------------------------------------------------------------- /tractconverter/formats/tck.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | 3 | # Documentation available here: 4 | # http://www.brain.org.au/software/mrtrix/appendix/mrtrix.html#tracks 5 | 6 | import os 7 | import copy 8 | import numpy as np 9 | 10 | from numpy import linalg 11 | import nibabel 12 | 13 | from tractconverter.formats.header import Header as H 14 | from tractconverter.formats import header 15 | from numpy.lib.index_tricks import c_, r_ 16 | 17 | 18 | class TCK: 19 | MAGIC_NUMBER = "mrtrix tracks" 20 | COUNT_OFFSET = len(MAGIC_NUMBER)+1 21 | BUFFER_SIZE = 1000000 22 | 23 | FIBER_DELIMITER = np.array([[np.nan, np.nan, np.nan]], 'f4') 98 | if hdr['datatype'].endswith('LE'): 99 | self.dtype = np.dtype(' 0: 110 | nbBytesToRead = min(remainingBytes, TCK.BUFFER_SIZE * 3 * self.dtype.itemsize) 111 | buff = f.read(nbBytesToRead) # Read TCK.BUFFER_SIZE triplets of coordinates (float) 112 | pts = np.frombuffer(buff, dtype=self.dtype) # Convert binary to float 113 | remainingBytes -= nbBytesToRead 114 | 115 | pts = pts.reshape([-1, 3]) 116 | nbNaNs = np.isnan(pts[:, 0]).sum() 117 | self.hdr[H.NB_FIBERS] += nbNaNs 118 | self.hdr[H.NB_POINTS] += len(pts) - nbNaNs 119 | 120 | # Because the file might end with a serie of 'inf' 121 | self.hdr[H.NB_POINTS] -= np.isinf(pts[:, 0]).sum() 122 | f.close() 123 | 124 | @classmethod 125 | def get_empty_header(cls): 126 | hdr = {} 127 | 128 | #Default values 129 | hdr[H.MAGIC_NUMBER] = cls.MAGIC_NUMBER 130 | hdr[H.NB_FIBERS] = 0 131 | 132 | return hdr 133 | 134 | def writeHeader(self): 135 | f = open(self.filename, 'wb') 136 | 137 | lines = [] 138 | lines.append(TCK.MAGIC_NUMBER) 139 | lines.append("count: {0:010}".format(self.hdr[H.NB_FIBERS])) 140 | lines.append("datatype: Float32LE") 141 | lines.append("file: . ") 142 | out = "\n".join(lines) 143 | f.write(out) 144 | offset = len(out) + 5 # +5 is for "\nEND\n", +1 is for the beginning of binary data 145 | self.offset = offset + len(str(offset)) 146 | 147 | if len(str(self.offset)) != len(str(offset)): 148 | self.offset += 1 149 | 150 | f.write(str(self.offset) + "\n") 151 | f.write("END\n") 152 | f.write(self.EOF_DELIMITER.tostring()) 153 | f.close() 154 | 155 | def close(self): 156 | pass 157 | 158 | def _calcTransform(self, anatFile): 159 | # The MrTrix fibers are defined in the same geometric reference 160 | # as the anatomical file. That is, the fibers coordinates are related to 161 | # the anatomy in world space. The transformation from local to world space 162 | # for the anatomy is encoded in the m_dh->m_niftiTransform member. 163 | # Since we do not consider this tranform when loading the anatomy, we must 164 | # bring back the fibers in the same reference, using the inverse of the 165 | # local to world transformation. A further problem arises when loading an 166 | # anatomy that has voxels with dimensions differing from 1x1x1. The 167 | # scaling factor is encoded in the transformation matrix, but we do not, 168 | # for the moment, use this scaling. Therefore, we must remove it from the 169 | # the transformation matrix before computing its inverse. 170 | if anatFile is None: 171 | self.M = np.identity(4, dtype=' 0 or not np.all(np.isinf(pts)): 221 | if remainingBytes > 0: 222 | nbBytesToRead = min(remainingBytes, TCK.BUFFER_SIZE * 3 * self.dtype.itemsize) 223 | buff += f.read(nbBytesToRead) # Read BUFFER_SIZE triplets of coordinates (float) 224 | remainingBytes -= nbBytesToRead 225 | # 226 | pts = np.frombuffer(buff, dtype=self.dtype) # Convert binary to float 227 | 228 | if self.dtype != ' 0: 244 | yield np.dot(c_[pts[idx_start:idx_end, :], np.ones([nbPts, 1], dtype=' 0] 276 | -------------------------------------------------------------------------------- /tractconverter/formats/trk.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | 3 | # Documentation available here: 4 | # http://www.trackvis.org/docs/?subsect=fileformat 5 | 6 | import io 7 | import os 8 | import copy 9 | import logging 10 | import numpy as np 11 | 12 | from tractconverter.formats import header 13 | from tractconverter.formats.header import Header as H 14 | 15 | 16 | def readBinaryBytes(f, nbBytes, dtype): 17 | buff = f.read(nbBytes * dtype.itemsize) 18 | return np.frombuffer(buff, dtype=dtype) 19 | 20 | 21 | class TRK: 22 | MAGIC_NUMBER = "TRACK" 23 | COUNT_OFFSET = 988 24 | OFFSET = 1000 25 | # self.hdr 26 | # self.filename 27 | # self.hdr[H.ENDIAN] 28 | # self.FIBER_DELIMITER 29 | # self.END_DELIMITER 30 | 31 | ##### 32 | # Static Methods 33 | ### 34 | @staticmethod 35 | def _check(filename): 36 | f = open(filename, 'rb') 37 | magicNumber = f.read(5) 38 | f.close() 39 | return magicNumber == TRK.MAGIC_NUMBER 40 | 41 | @staticmethod 42 | def create(filename, hdr=None, anatFile=None): 43 | f = open(filename, 'wb') 44 | f.write(TRK.MAGIC_NUMBER + "\n") 45 | f.close() 46 | 47 | if hdr is None: 48 | hdr = TRK.get_empty_header() 49 | else: 50 | hdr = copy.deepcopy(hdr) 51 | 52 | hdr[H.NB_FIBERS] = 0 # NB_FIBERS will be updated when using iadd(). 53 | 54 | trk = TRK(filename, load=False) 55 | trk.hdr = hdr 56 | trk.writeHeader() 57 | 58 | return trk 59 | 60 | ##### 61 | # Methods 62 | ### 63 | def __init__(self, filename, anatFile=None, load=True): 64 | if not TRK._check(filename): 65 | raise NameError("Not a TRK file.") 66 | 67 | self.filename = filename 68 | self.hdr = {} 69 | if load: 70 | self._load() 71 | self.hdr = header.get_header_from_anat(anatFile, self.hdr) 72 | 73 | def _load(self): 74 | f = open(self.filename, 'rb') 75 | 76 | ##### 77 | # Read header 78 | ### 79 | self.hdr[H.MAGIC_NUMBER] = f.read(6) 80 | self.hdr[H.DIMENSIONS] = np.frombuffer(f.read(6), dtype='i4') 115 | self.hdr["version"] = self.hdr["version"].astype('>i4') 116 | self.hdr["hdr_size"] = self.hdr["hdr_size"].astype('>i4') 117 | 118 | nb_fibers = 0 119 | self.hdr[H.NB_POINTS] = 0 120 | 121 | #Either verify the number of streamlines specified in the header is correct or 122 | # count the actual number of streamlines in case it is not specified in the header. 123 | remainingBytes = os.path.getsize(self.filename) - self.OFFSET 124 | while remainingBytes > 0: 125 | # Read points 126 | nbPoints = readBinaryBytes(f, 1, np.dtype(self.hdr[H.ENDIAN] + "i4"))[0] 127 | self.hdr[H.NB_POINTS] += nbPoints 128 | # This seek is used to go to the next points number indication in the file. 129 | f.seek((nbPoints * (3 + self.hdr[H.NB_SCALARS_BY_POINT]) 130 | + self.hdr[H.NB_PROPERTIES_BY_TRACT]) * 4, 1) # Relative seek 131 | remainingBytes -= (nbPoints * (3 + self.hdr[H.NB_SCALARS_BY_POINT]) 132 | + self.hdr[H.NB_PROPERTIES_BY_TRACT]) * 4 + 4 133 | nb_fibers += 1 134 | 135 | if self.hdr[H.NB_FIBERS] != nb_fibers: 136 | logging.warn(('The number of streamlines specified in header ({0}) does not match ' 137 | 'the actual number of streamlines contained in this file ({1}). ' 138 | 'The latter will be used.').format(self.hdr[H.NB_FIBERS], nb_fibers)) 139 | 140 | self.hdr[H.NB_FIBERS] = nb_fibers 141 | 142 | f.close() 143 | 144 | @classmethod 145 | def get_empty_header(cls): 146 | hdr = {} 147 | 148 | #Default values 149 | hdr[H.MAGIC_NUMBER] = cls.MAGIC_NUMBER 150 | hdr[H.VOXEL_SIZES] = (1, 1, 1) 151 | hdr[H.DIMENSIONS] = (1, 1, 1) 152 | hdr[H.VOXEL_TO_WORLD] = np.eye(4) 153 | hdr[H.VOXEL_ORDER] = 'LPS' # Trackvis's default is LPS 154 | hdr[H.NB_FIBERS] = 0 155 | hdr['version'] = 2 156 | hdr['hdr_size'] = cls.OFFSET 157 | 158 | return hdr 159 | 160 | def writeHeader(self): 161 | # Get the voxel size and format it as an array. 162 | voxel_sizes = np.asarray(self.hdr.get(H.VOXEL_SIZES, (1.0, 1.0, 1.0)), dtype=' 0: 226 | # Read points 227 | nbPoints = readBinaryBytes(f, 1, np.dtype(self.hdr[H.ENDIAN] + "i4"))[0] 228 | ptsAndScalars = readBinaryBytes(f, 229 | nbPoints * (3 + self.hdr[H.NB_SCALARS_BY_POINT]), 230 | np.dtype(self.hdr[H.ENDIAN] + "f4")) 231 | 232 | # If there are some properties, ignore them for now. 233 | properties = readBinaryBytes(f, 234 | self.hdr[H.NB_PROPERTIES_BY_TRACT], 235 | np.dtype(self.hdr[H.ENDIAN] + "f4")) 236 | 237 | newShape = [-1, 3 + self.hdr[H.NB_SCALARS_BY_POINT]] 238 | ptsAndScalars = ptsAndScalars.reshape(newShape) 239 | 240 | pointsWithoutScalars = ptsAndScalars[:, 0:3] 241 | yield pointsWithoutScalars 242 | 243 | remainingBytes -= 4 # Number of points 244 | remainingBytes -= nbPoints * (3 + self.hdr[H.NB_SCALARS_BY_POINT]) * 4 245 | # For now, we do not process the tract properties, so just skip over them. 246 | remainingBytes -= self.hdr[H.NB_PROPERTIES_BY_TRACT] * 4 247 | cpt += 1 248 | 249 | f.close() 250 | 251 | def load_all(self): 252 | if self.hdr[H.NB_FIBERS] == 0: 253 | return [] 254 | 255 | with open(self.filename, 'rb') as f: 256 | f.seek(self.OFFSET) 257 | buff = io.BytesIO(f.read()) 258 | 259 | remainingBytes = os.path.getsize(self.filename) - self.OFFSET 260 | 261 | streamlines = [] 262 | cpt = 0 263 | while cpt < self.hdr[H.NB_FIBERS] or remainingBytes > 0: 264 | # Read points 265 | nbPoints = readBinaryBytes(buff, 1, np.dtype(self.hdr[H.ENDIAN] + "i4"))[0] 266 | ptsAndScalars = readBinaryBytes(buff, 267 | nbPoints * (3 + self.hdr[H.NB_SCALARS_BY_POINT]), 268 | np.dtype(self.hdr[H.ENDIAN] + "f4")) 269 | 270 | # If there are some properties, ignore them for now. 271 | properties = readBinaryBytes(buff, 272 | self.hdr[H.NB_PROPERTIES_BY_TRACT], 273 | np.dtype(self.hdr[H.ENDIAN] + "f4")) 274 | 275 | newShape = [-1, 3 + self.hdr[H.NB_SCALARS_BY_POINT]] 276 | ptsAndScalars = ptsAndScalars.reshape(newShape) 277 | 278 | pointsWithoutScalars = ptsAndScalars[:, 0:3] 279 | streamlines.append(pointsWithoutScalars) 280 | 281 | remainingBytes -= 4 # Number of points 282 | remainingBytes -= nbPoints * (3 + self.hdr[H.NB_SCALARS_BY_POINT]) * 4 283 | # For now, we do not process the tract properties, so just skip over them. 284 | remainingBytes -= self.hdr[H.NB_PROPERTIES_BY_TRACT] * 4 285 | cpt += 1 286 | 287 | return streamlines 288 | 289 | def __str__(self): 290 | text = "" 291 | text += "MAGIC NUMBER: {0}".format(self.hdr[H.MAGIC_NUMBER]) 292 | text += "\nv.{0}".format(self.hdr['version']) 293 | text += "\ndim: {0}".format(self.hdr[H.DIMENSIONS]) 294 | text += "\nvoxel_sizes: {0}".format(self.hdr[H.VOXEL_SIZES]) 295 | text += "\norigin: {0}".format(self.hdr[H.ORIGIN]) 296 | text += "\nnb_scalars: {0}".format(self.hdr[H.NB_SCALARS_BY_POINT]) 297 | text += "\nscalar_name:\n{0}".format("\n".join(self.hdr['scalar_name'])) 298 | text += "\nnb_properties: {0}".format(self.hdr[H.NB_PROPERTIES_BY_TRACT]) 299 | text += "\nproperty_name:\n{0}".format("\n".join(self.hdr['property_name'])) 300 | text += "\nvox_to_world:\n{0}".format(self.hdr[H.VOXEL_TO_WORLD]) 301 | text += "\nworld_order: {0}".format(self.hdr[H.WORLD_ORDER]) 302 | text += "\nvoxel_order: {0}".format(self.hdr[H.VOXEL_ORDER]) 303 | text += "\nimage_orientation_patient: {0}".format(self.hdr['image_orientation_patient']) 304 | text += "\npad1: {0}".format(self.hdr['pad1']) 305 | text += "\npad2: {0}".format(self.hdr['pad2']) 306 | text += "\ninvert_x: {0}".format(self.hdr['invert_x']) 307 | text += "\ninvert_y: {0}".format(self.hdr['invert_y']) 308 | text += "\ninvert_z: {0}".format(self.hdr['invert_z']) 309 | text += "\nswap_xy: {0}".format(self.hdr['swap_xy']) 310 | text += "\nswap_yz: {0}".format(self.hdr['swap_yz']) 311 | text += "\nswap_zx: {0}".format(self.hdr['swap_zx']) 312 | text += "\nn_count: {0}".format(self.hdr[H.NB_FIBERS]) 313 | text += "\nhdr_size: {0}".format(self.hdr['hdr_size']) 314 | text += "\nendianess: {0}".format(self.hdr[H.ENDIAN]) 315 | 316 | return text 317 | -------------------------------------------------------------------------------- /tractconverter/formats/vtk.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | 3 | # Documentation available here: 4 | # http://www.vtk.org/VTK/img/file-formats.pdf 5 | 6 | import os 7 | import copy 8 | import tempfile 9 | import numpy as np 10 | 11 | from tractconverter.formats import header 12 | from tractconverter.formats.header import Header as H 13 | 14 | 15 | def readBinaryBytes(f, nbBytes, dtype): 16 | buff = f.read(nbBytes * dtype.itemsize) 17 | return np.frombuffer(buff, dtype=dtype) 18 | 19 | 20 | def readAsciiBytes(f, nbWords, dtype): 21 | words = [] 22 | buff = "" 23 | while len(words) < nbWords: 24 | c = f.read(1) 25 | if c == " " or c == '\n': 26 | if len(buff) > 0: 27 | words.append(buff) 28 | buff = "" 29 | else: 30 | buff += c 31 | 32 | return np.array(' '.join(words).split(), dtype=dtype) 33 | 34 | 35 | # We assume the file cursor points to the beginning of the file. 36 | def checkIfBinary(f): 37 | f.readline() # Skip version 38 | f.readline() # Skip description 39 | file_type = f.readline().strip() # Type of the file BINARY or ASCII. 40 | 41 | f.seek(0, 0) # Reset cursor to beginning of the file. 42 | 43 | return file_type.upper() == "BINARY" 44 | 45 | 46 | def convertAsciiToBinary(original_filename): 47 | sections = get_sections(original_filename) 48 | 49 | f = open(original_filename, 'rb') 50 | 51 | # Skip the first header lines 52 | f.readline() # Version (not used) 53 | f.readline() # Description (not used) 54 | original_file_type = f.readline().strip() # Type of the file BINARY or ASCII. 55 | f.readline() # Data type (not used) 56 | 57 | if original_file_type.upper() != "ASCII": 58 | raise ValueError("BINARY file given to convertAsciiToBinary.") 59 | 60 | # Create a temporary file with a name. Delete is set to false to make sure 61 | # the file is not automatically deleted when closed. 62 | binary_file = tempfile.NamedTemporaryFile(delete=False) 63 | 64 | # Write header 65 | binary_file.write("# {0} DataFile Version {1}\n".format(VTK.MAGIC_NUMBER, VTK.VERSION)) 66 | binary_file.write("converted from ASCII vtk by tractconverter\n") 67 | binary_file.write("BINARY\n") 68 | binary_file.write("DATASET POLYDATA\n") 69 | 70 | # Convert POINTS section from ASCII to binary 71 | f.seek(sections['POINTS'], os.SEEK_SET) 72 | line = f.readline() # POINTS n float 73 | nb_coordinates = int(line.split()[1]) * 3 74 | binary_file.write(line) 75 | 76 | while nb_coordinates * 3 > 0: 77 | tokens = f.readline().split() 78 | 79 | #Skip empty lines 80 | if len(tokens) == 0: 81 | continue 82 | 83 | binary_file.write(np.array(tokens, dtype='>f4').tostring()) 84 | nb_coordinates -= len(tokens) 85 | 86 | binary_file.write('\n') 87 | 88 | if 'LINES' in sections: 89 | # Convert LINES section from ASCII to binary 90 | f.seek(sections['LINES'], os.SEEK_SET) 91 | line = f.readline() # LINES n size 92 | nb_lines = int(line.split()[1]) 93 | binary_file.write(line) 94 | 95 | while nb_lines > 0: 96 | tokens = f.readline().split() 97 | 98 | #Skip empty lines 99 | if len(tokens) == 0: 100 | continue 101 | 102 | #Write number of points in the line 103 | binary_file.write(np.array([tokens[0]], dtype='>i4').tostring()) 104 | #Write indices of points in the line 105 | binary_file.write(np.array(tokens[1:], dtype='>i4').tostring()) 106 | nb_lines -= 1 107 | 108 | # TODO: COLORS, SCALARS 109 | 110 | binary_file.close() 111 | f.close() 112 | 113 | return binary_file.name 114 | 115 | POLYDATA_SECTIONS = ['POINTS', 'VERTICES', 'LINES', 'POLYGONS', 'TRIANGLE_STRIPS'] 116 | 117 | 118 | def get_sections(filename): 119 | sections_found = {} 120 | nb_read_bytes = 0 121 | with open(filename, 'rb') as f: 122 | for line in f: 123 | for section in POLYDATA_SECTIONS: 124 | if line.upper().startswith(section): 125 | if section in sections_found: 126 | print "Warning multiple {0} sections!".format(section) 127 | 128 | sections_found[section] = nb_read_bytes 129 | 130 | nb_read_bytes += len(line) 131 | 132 | return sections_found 133 | 134 | 135 | class VTK: 136 | MAGIC_NUMBER = "vtk" 137 | VERSION = "3.0" 138 | BUFFER = 10000 139 | 140 | # self.hdr 141 | # self.filename 142 | # self.endian 143 | # self.offset 144 | # self.FIBER_DELIMITER 145 | # self.END_DELIMITER 146 | 147 | ##### 148 | # Static Methods 149 | ### 150 | @staticmethod 151 | def _check(filename): 152 | f = open(filename, 'rb') 153 | magicNumber = f.readline().strip() 154 | f.close() 155 | return VTK.MAGIC_NUMBER in magicNumber 156 | 157 | @staticmethod 158 | def create(filename, hdr=None, anatFile=None): 159 | f = open(filename, 'wb') 160 | f.write(VTK.MAGIC_NUMBER + "\n") 161 | f.close() 162 | 163 | if hdr is None: 164 | hdr = VTK.get_empty_header() 165 | else: 166 | hdr = copy.deepcopy(hdr) 167 | 168 | vtk = VTK(filename, load=False) 169 | vtk.hdr = hdr 170 | vtk.writeHeader() 171 | 172 | return vtk 173 | 174 | ##### 175 | # Methods 176 | ### 177 | def __init__(self, filename, anatFile=None, load=True): 178 | if not VTK._check(filename): 179 | raise NameError("Not a VTK file.") 180 | 181 | self.filename = filename 182 | self.original_filename = filename 183 | 184 | self.hdr = {} 185 | if load: 186 | self.hdr = header.get_header_from_anat(anatFile) 187 | self._load() 188 | 189 | def __del__(self): 190 | self.cleanTempFile() 191 | 192 | def _load(self): 193 | f = open(self.filename, 'rb') 194 | 195 | ##### 196 | # Read header 197 | ### 198 | info = f.readline().split() 199 | self.hdr[H.MAGIC_NUMBER] = info[1] 200 | self.hdr["version"] = info[-1] 201 | self.hdr["description"] = f.readline().strip() 202 | self.hdr["file_type"] = f.readline().strip() 203 | 204 | ##### 205 | # If in ASCII format, create a temporary Binary file. This 206 | # will avoid lots of problems when reading. 207 | # We will always read a binary file, converted or not. 208 | ##### 209 | if "BINARY" != self.hdr["file_type"].upper(): 210 | f.close() 211 | binary_filename = convertAsciiToBinary(self.filename) 212 | self.filename = binary_filename 213 | 214 | self.sections = get_sections(self.filename) 215 | 216 | #TODO: Check number of scalars and properties 217 | self.hdr[H.NB_SCALARS_BY_POINT] = "N/A" 218 | self.hdr[H.NB_PROPERTIES_BY_TRACT] = "N/A" 219 | 220 | f = open(self.filename, 'rb') 221 | 222 | ##### 223 | # Read header 224 | ### 225 | f.readline() # Version (not used) 226 | f.readline() # Description (not used) 227 | self.fileType = f.readline().strip() # Type of the file BINARY or ASCII. 228 | f.readline() # Data type (not used) 229 | 230 | #self.offset = f.tell() # Store offset to the beginning of data. 231 | 232 | f.seek(self.sections['POINTS'], os.SEEK_SET) 233 | self.hdr[H.NB_POINTS] = int(f.readline().split()[1]) # POINTS n float 234 | #self.offset_points = f.tell() 235 | 236 | #f.seek(self.hdr[H.NB_POINTS] * 3 * 4, 1) # Skip nb_points * 3 (x,y,z) * 4 bytes 237 | # Skip newline, to bring to the line containing the LINES marker. 238 | #f.readline() 239 | 240 | self.hdr[H.NB_FIBERS] = 0 241 | if 'LINES' in self.sections: 242 | f.seek(self.sections['LINES'], os.SEEK_SET) 243 | infos = f.readline().split() # LINES n size 244 | self.hdr[H.NB_FIBERS] = int(infos[1]) 245 | #size = int(infos[2]) 246 | 247 | #self.offset_lines = f.tell() 248 | #f.seek(size * 4, 1) # Skip nb_lines + nb_points * 4 bytes 249 | 250 | # TODO: Read infos about COLORS, SCALARS, ... 251 | 252 | f.close() 253 | 254 | @classmethod 255 | def get_empty_header(cls): 256 | hdr = {} 257 | 258 | #Default values 259 | hdr[H.MAGIC_NUMBER] = cls.MAGIC_NUMBER 260 | hdr[H.NB_FIBERS] = 0 261 | hdr[H.NB_POINTS] = 0 262 | hdr[H.NB_SCALARS_BY_POINT] = 0 263 | hdr[H.NB_PROPERTIES_BY_TRACT] = 0 264 | 265 | return hdr 266 | 267 | def writeHeader(self): 268 | self.sections = {} 269 | f = open(self.filename, 'wb') 270 | f.write("# {0} DataFile Version {1}\n".format(VTK.MAGIC_NUMBER, VTK.VERSION)) 271 | f.write("vtk comments\n") 272 | f.write("BINARY\n") # Support only binary file for saving. 273 | f.write("DATASET POLYDATA\n") 274 | 275 | # POINTS 276 | self.sections['POINTS'] = f.tell() 277 | f.write("POINTS {0} float\n".format(self.hdr[H.NB_POINTS])) 278 | self.sections['POINTS_start'] = f.tell() 279 | self.sections['POINTS_current'] = f.tell() 280 | #self.offset = f.tell() 281 | f.write(np.zeros((self.hdr[H.NB_POINTS], 3), dtype='>f4')) 282 | 283 | f.write('\n') 284 | 285 | # LINES 286 | if self.hdr[H.NB_FIBERS] > 0: 287 | self.sections['LINES'] = f.tell() 288 | size = self.hdr[H.NB_FIBERS] + self.hdr[H.NB_POINTS] 289 | f.write("LINES {0} {1}\n".format(self.hdr[H.NB_FIBERS], size)) 290 | self.sections['LINES_current'] = f.tell() 291 | f.write(np.zeros(size, dtype='>i4')) 292 | 293 | # TODO: COLORS, SCALARS 294 | 295 | f.close() 296 | 297 | def cleanTempFile(self): 298 | # If the filenames differ, we converted an ASCII file to a binary file. 299 | # In this case, if the temporary binary file still exists, we need to clean up behind ourselves. 300 | if self.filename != self.original_filename and os.path.exists(self.filename): 301 | os.remove(self.filename) 302 | self.filename = self.original_filename 303 | 304 | def close(self): 305 | self.cleanTempFile() 306 | pass 307 | 308 | # TODO: make it really dynamic if possible (like trk and tck). 309 | def __iadd__(self, fibers): 310 | if len(fibers) == 0: 311 | return self 312 | 313 | f = open(self.filename, 'r+b') 314 | f.seek(self.sections['POINTS_current'], os.SEEK_SET) 315 | 316 | nb_points = (self.sections['POINTS_current'] - self.sections['POINTS_start']) // 3 // 4 317 | for fib in fibers: 318 | f.write(fib.astype('>f4').tostring()) 319 | 320 | self.sections['POINTS_current'] = f.tell() 321 | 322 | f.seek(self.sections['LINES_current'], os.SEEK_SET) 323 | for fib in fibers: 324 | f.write(np.array([len(fib)], dtype='>i4').tostring()) 325 | f.write(np.arange(nb_points, nb_points + len(fib), dtype='>i4').tostring()) 326 | nb_points += len(fib) 327 | 328 | self.sections['LINES_current'] = f.tell() 329 | 330 | f.close() 331 | 332 | return self 333 | 334 | ##### 335 | # Iterate through fibers 336 | # TODO: Use a buffer instead of reading one streamline at the time. 337 | ### 338 | def __iter__(self): 339 | if self.hdr[H.NB_FIBERS] == 0: 340 | return 341 | 342 | f = open(self.filename, 'rb') 343 | 344 | #Keep important positions in the file. 345 | f.seek(self.sections['POINTS'], os.SEEK_SET) 346 | f.readline() 347 | self.sections['POINTS_current'] = f.tell() 348 | 349 | f.seek(self.sections['LINES'], os.SEEK_SET) 350 | f.readline() 351 | self.sections['LINES_current'] = f.tell() 352 | 353 | for i in range(0, self.hdr[H.NB_FIBERS], self.BUFFER): 354 | f.seek(self.sections['LINES_current'], os.SEEK_SET) # Seek from beginning of the file 355 | 356 | # Read indices of next streamline 357 | nbIdx = [] 358 | ptsIdx = [] 359 | for k in range(min(self.hdr[H.NB_FIBERS], i+self.BUFFER) - i): 360 | nbIdx.append(readBinaryBytes(f, 1, np.dtype('>i4'))[0]) 361 | ptsIdx.append(readBinaryBytes(f, nbIdx[-1], np.dtype('>i4'))) 362 | 363 | self.sections['LINES_current'] = f.tell() 364 | 365 | # Read points according to indices previously read 366 | startPos = np.min(ptsIdx[0]) * 3 # Minimum index * 3 (x,y,z) 367 | endPos = (np.max(ptsIdx[-1]) + 1) * 3 # After maximum index * 3 (x,y,z) 368 | f.seek(self.sections['POINTS_current'] + startPos * 4, os.SEEK_SET) # Seek from beginning of the file 369 | 370 | points = readBinaryBytes(f, endPos - startPos, np.dtype('>f4')) 371 | points = points.reshape([-1, 3]) # Matrix dimension: Nx3 372 | 373 | # TODO: Read COLORS, SCALARS, ... 374 | for pts_id in ptsIdx: 375 | yield points[pts_id - startPos/3] 376 | 377 | f.close() 378 | 379 | def load_all(self): 380 | # TODO: make it more efficient, load everything in memory first 381 | # and to processing afterward. 382 | return [s for s in self] 383 | 384 | def __str__(self): 385 | text = "" 386 | text += "MAGIC NUMBER: {0}".format(self.hdr[H.MAGIC_NUMBER]) 387 | text += "\nv.{0}".format(self.hdr['version']) 388 | text += "\nDescription: '{0}'".format(self.hdr['description']) 389 | text += "\nFile type: {0}".format(self.hdr['file_type']) 390 | text += "\nnb_scalars: {0}".format(self.hdr[H.NB_SCALARS_BY_POINT]) 391 | text += "\nnb_properties: {0}".format(self.hdr[H.NB_PROPERTIES_BY_TRACT]) 392 | text += "\nn_count: {0}".format(self.hdr[H.NB_FIBERS]) 393 | 394 | return text 395 | -------------------------------------------------------------------------------- /distribute_setup.py: -------------------------------------------------------------------------------- 1 | #!python 2 | """Bootstrap distribute installation 3 | 4 | If you want to use setuptools in your package's setup.py, just include this 5 | file in the same directory with it, and add this to the top of your setup.py:: 6 | 7 | from distribute_setup import use_setuptools 8 | use_setuptools() 9 | 10 | If you want to require a specific version of setuptools, set a download 11 | mirror, or use an alternate download directory, you can do so by supplying 12 | the appropriate options to ``use_setuptools()``. 13 | 14 | This file can also be run as a script to install or upgrade setuptools. 15 | """ 16 | import os 17 | import sys 18 | import time 19 | import fnmatch 20 | import tempfile 21 | import tarfile 22 | from distutils import log 23 | 24 | try: 25 | from site import USER_SITE 26 | except ImportError: 27 | USER_SITE = None 28 | 29 | try: 30 | import subprocess 31 | 32 | def _python_cmd(*args): 33 | args = (sys.executable,) + args 34 | return subprocess.call(args) == 0 35 | 36 | except ImportError: 37 | # will be used for python 2.3 38 | def _python_cmd(*args): 39 | args = (sys.executable,) + args 40 | # quoting arguments if windows 41 | if sys.platform == 'win32': 42 | def quote(arg): 43 | if ' ' in arg: 44 | return '"%s"' % arg 45 | return arg 46 | args = [quote(arg) for arg in args] 47 | return os.spawnl(os.P_WAIT, sys.executable, *args) == 0 48 | 49 | DEFAULT_VERSION = "0.6.14" 50 | DEFAULT_URL = "https://pypi.python.org/packages/source/d/distribute/" 51 | SETUPTOOLS_FAKED_VERSION = "0.6c11" 52 | 53 | SETUPTOOLS_PKG_INFO = """\ 54 | Metadata-Version: 1.0 55 | Name: setuptools 56 | Version: %s 57 | Summary: xxxx 58 | Home-page: xxx 59 | Author: xxx 60 | Author-email: xxx 61 | License: xxx 62 | Description: xxx 63 | """ % SETUPTOOLS_FAKED_VERSION 64 | 65 | 66 | def _install(tarball): 67 | # extracting the tarball 68 | tmpdir = tempfile.mkdtemp() 69 | log.warn('Extracting in %s', tmpdir) 70 | old_wd = os.getcwd() 71 | try: 72 | os.chdir(tmpdir) 73 | tar = tarfile.open(tarball) 74 | _extractall(tar) 75 | tar.close() 76 | 77 | # going in the directory 78 | subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) 79 | os.chdir(subdir) 80 | log.warn('Now working in %s', subdir) 81 | 82 | # installing 83 | log.warn('Installing Distribute') 84 | if not _python_cmd('setup.py', 'install'): 85 | log.warn('Something went wrong during the installation.') 86 | log.warn('See the error message above.') 87 | finally: 88 | os.chdir(old_wd) 89 | 90 | 91 | def _build_egg(egg, tarball, to_dir): 92 | # extracting the tarball 93 | tmpdir = tempfile.mkdtemp() 94 | log.warn('Extracting in %s', tmpdir) 95 | old_wd = os.getcwd() 96 | try: 97 | os.chdir(tmpdir) 98 | tar = tarfile.open(tarball) 99 | _extractall(tar) 100 | tar.close() 101 | 102 | # going in the directory 103 | subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) 104 | os.chdir(subdir) 105 | log.warn('Now working in %s', subdir) 106 | 107 | # building an egg 108 | log.warn('Building a Distribute egg in %s', to_dir) 109 | _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir) 110 | 111 | finally: 112 | os.chdir(old_wd) 113 | # returning the result 114 | log.warn(egg) 115 | if not os.path.exists(egg): 116 | raise IOError('Could not build the egg.') 117 | 118 | 119 | def _do_download(version, download_base, to_dir, download_delay): 120 | egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg' 121 | % (version, sys.version_info[0], sys.version_info[1])) 122 | if not os.path.exists(egg): 123 | tarball = download_setuptools(version, download_base, 124 | to_dir, download_delay) 125 | _build_egg(egg, tarball, to_dir) 126 | sys.path.insert(0, egg) 127 | import setuptools 128 | setuptools.bootstrap_install_from = egg 129 | 130 | 131 | def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, 132 | to_dir=os.curdir, download_delay=15, no_fake=True): 133 | # making sure we use the absolute path 134 | to_dir = os.path.abspath(to_dir) 135 | was_imported = 'pkg_resources' in sys.modules or \ 136 | 'setuptools' in sys.modules 137 | try: 138 | try: 139 | import pkg_resources 140 | if not hasattr(pkg_resources, '_distribute'): 141 | if not no_fake: 142 | _fake_setuptools() 143 | raise ImportError 144 | except ImportError: 145 | return _do_download(version, download_base, to_dir, download_delay) 146 | try: 147 | pkg_resources.require("distribute>="+version) 148 | return 149 | except pkg_resources.VersionConflict: 150 | e = sys.exc_info()[1] 151 | if was_imported: 152 | sys.stderr.write( 153 | "The required version of distribute (>=%s) is not available,\n" 154 | "and can't be installed while this script is running. Please\n" 155 | "install a more recent version first, using\n" 156 | "'easy_install -U distribute'." 157 | "\n\n(Currently using %r)\n" % (version, e.args[0])) 158 | sys.exit(2) 159 | else: 160 | del pkg_resources, sys.modules['pkg_resources'] # reload ok 161 | return _do_download(version, download_base, to_dir, 162 | download_delay) 163 | except pkg_resources.DistributionNotFound: 164 | return _do_download(version, download_base, to_dir, 165 | download_delay) 166 | finally: 167 | if not no_fake: 168 | _create_fake_setuptools_pkg_info(to_dir) 169 | 170 | def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, 171 | to_dir=os.curdir, delay=15): 172 | """Download distribute from a specified location and return its filename 173 | 174 | `version` should be a valid distribute version number that is available 175 | as an egg for download under the `download_base` URL (which should end 176 | with a '/'). `to_dir` is the directory where the egg will be downloaded. 177 | `delay` is the number of seconds to pause before an actual download 178 | attempt. 179 | """ 180 | # making sure we use the absolute path 181 | to_dir = os.path.abspath(to_dir) 182 | try: 183 | from urllib.request import urlopen 184 | except ImportError: 185 | from urllib2 import urlopen 186 | tgz_name = "distribute-%s.tar.gz" % version 187 | url = download_base + tgz_name 188 | saveto = os.path.join(to_dir, tgz_name) 189 | src = dst = None 190 | if not os.path.exists(saveto): # Avoid repeated downloads 191 | try: 192 | log.warn("Downloading %s", url) 193 | src = urlopen(url) 194 | # Read/write all in one block, so we don't create a corrupt file 195 | # if the download is interrupted. 196 | data = src.read() 197 | dst = open(saveto, "wb") 198 | dst.write(data) 199 | finally: 200 | if src: 201 | src.close() 202 | if dst: 203 | dst.close() 204 | return os.path.realpath(saveto) 205 | 206 | def _no_sandbox(function): 207 | def __no_sandbox(*args, **kw): 208 | try: 209 | from setuptools.sandbox import DirectorySandbox 210 | if not hasattr(DirectorySandbox, '_old'): 211 | def violation(*args): 212 | pass 213 | DirectorySandbox._old = DirectorySandbox._violation 214 | DirectorySandbox._violation = violation 215 | patched = True 216 | else: 217 | patched = False 218 | except ImportError: 219 | patched = False 220 | 221 | try: 222 | return function(*args, **kw) 223 | finally: 224 | if patched: 225 | DirectorySandbox._violation = DirectorySandbox._old 226 | del DirectorySandbox._old 227 | 228 | return __no_sandbox 229 | 230 | def _patch_file(path, content): 231 | """Will backup the file then patch it""" 232 | existing_content = open(path).read() 233 | if existing_content == content: 234 | # already patched 235 | log.warn('Already patched.') 236 | return False 237 | log.warn('Patching...') 238 | _rename_path(path) 239 | f = open(path, 'w') 240 | try: 241 | f.write(content) 242 | finally: 243 | f.close() 244 | return True 245 | 246 | _patch_file = _no_sandbox(_patch_file) 247 | 248 | def _same_content(path, content): 249 | return open(path).read() == content 250 | 251 | def _rename_path(path): 252 | new_name = path + '.OLD.%s' % time.time() 253 | log.warn('Renaming %s into %s', path, new_name) 254 | os.rename(path, new_name) 255 | return new_name 256 | 257 | def _remove_flat_installation(placeholder): 258 | if not os.path.isdir(placeholder): 259 | log.warn('Unkown installation at %s', placeholder) 260 | return False 261 | found = False 262 | for file in os.listdir(placeholder): 263 | if fnmatch.fnmatch(file, 'setuptools*.egg-info'): 264 | found = True 265 | break 266 | if not found: 267 | log.warn('Could not locate setuptools*.egg-info') 268 | return 269 | 270 | log.warn('Removing elements out of the way...') 271 | pkg_info = os.path.join(placeholder, file) 272 | if os.path.isdir(pkg_info): 273 | patched = _patch_egg_dir(pkg_info) 274 | else: 275 | patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO) 276 | 277 | if not patched: 278 | log.warn('%s already patched.', pkg_info) 279 | return False 280 | # now let's move the files out of the way 281 | for element in ('setuptools', 'pkg_resources.py', 'site.py'): 282 | element = os.path.join(placeholder, element) 283 | if os.path.exists(element): 284 | _rename_path(element) 285 | else: 286 | log.warn('Could not find the %s element of the ' 287 | 'Setuptools distribution', element) 288 | return True 289 | 290 | _remove_flat_installation = _no_sandbox(_remove_flat_installation) 291 | 292 | def _after_install(dist): 293 | log.warn('After install bootstrap.') 294 | placeholder = dist.get_command_obj('install').install_purelib 295 | _create_fake_setuptools_pkg_info(placeholder) 296 | 297 | def _create_fake_setuptools_pkg_info(placeholder): 298 | if not placeholder or not os.path.exists(placeholder): 299 | log.warn('Could not find the install location') 300 | return 301 | pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1]) 302 | setuptools_file = 'setuptools-%s-py%s.egg-info' % \ 303 | (SETUPTOOLS_FAKED_VERSION, pyver) 304 | pkg_info = os.path.join(placeholder, setuptools_file) 305 | if os.path.exists(pkg_info): 306 | log.warn('%s already exists', pkg_info) 307 | return 308 | 309 | log.warn('Creating %s', pkg_info) 310 | f = open(pkg_info, 'w') 311 | try: 312 | f.write(SETUPTOOLS_PKG_INFO) 313 | finally: 314 | f.close() 315 | 316 | pth_file = os.path.join(placeholder, 'setuptools.pth') 317 | log.warn('Creating %s', pth_file) 318 | f = open(pth_file, 'w') 319 | try: 320 | f.write(os.path.join(os.curdir, setuptools_file)) 321 | finally: 322 | f.close() 323 | 324 | _create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info) 325 | 326 | def _patch_egg_dir(path): 327 | # let's check if it's already patched 328 | pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO') 329 | if os.path.exists(pkg_info): 330 | if _same_content(pkg_info, SETUPTOOLS_PKG_INFO): 331 | log.warn('%s already patched.', pkg_info) 332 | return False 333 | _rename_path(path) 334 | os.mkdir(path) 335 | os.mkdir(os.path.join(path, 'EGG-INFO')) 336 | pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO') 337 | f = open(pkg_info, 'w') 338 | try: 339 | f.write(SETUPTOOLS_PKG_INFO) 340 | finally: 341 | f.close() 342 | return True 343 | 344 | _patch_egg_dir = _no_sandbox(_patch_egg_dir) 345 | 346 | def _before_install(): 347 | log.warn('Before install bootstrap.') 348 | _fake_setuptools() 349 | 350 | 351 | def _under_prefix(location): 352 | if 'install' not in sys.argv: 353 | return True 354 | args = sys.argv[sys.argv.index('install')+1:] 355 | for index, arg in enumerate(args): 356 | for option in ('--root', '--prefix'): 357 | if arg.startswith('%s=' % option): 358 | top_dir = arg.split('root=')[-1] 359 | return location.startswith(top_dir) 360 | elif arg == option: 361 | if len(args) > index: 362 | top_dir = args[index+1] 363 | return location.startswith(top_dir) 364 | if arg == '--user' and USER_SITE is not None: 365 | return location.startswith(USER_SITE) 366 | return True 367 | 368 | 369 | def _fake_setuptools(): 370 | log.warn('Scanning installed packages') 371 | try: 372 | import pkg_resources 373 | except ImportError: 374 | # we're cool 375 | log.warn('Setuptools or Distribute does not seem to be installed.') 376 | return 377 | ws = pkg_resources.working_set 378 | try: 379 | setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools', 380 | replacement=False)) 381 | except TypeError: 382 | # old distribute API 383 | setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools')) 384 | 385 | if setuptools_dist is None: 386 | log.warn('No setuptools distribution found') 387 | return 388 | # detecting if it was already faked 389 | setuptools_location = setuptools_dist.location 390 | log.warn('Setuptools installation detected at %s', setuptools_location) 391 | 392 | # if --root or --preix was provided, and if 393 | # setuptools is not located in them, we don't patch it 394 | if not _under_prefix(setuptools_location): 395 | log.warn('Not patching, --root or --prefix is installing Distribute' 396 | ' in another location') 397 | return 398 | 399 | # let's see if its an egg 400 | if not setuptools_location.endswith('.egg'): 401 | log.warn('Non-egg installation') 402 | res = _remove_flat_installation(setuptools_location) 403 | if not res: 404 | return 405 | else: 406 | log.warn('Egg installation') 407 | pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO') 408 | if (os.path.exists(pkg_info) and 409 | _same_content(pkg_info, SETUPTOOLS_PKG_INFO)): 410 | log.warn('Already patched.') 411 | return 412 | log.warn('Patching...') 413 | # let's create a fake egg replacing setuptools one 414 | res = _patch_egg_dir(setuptools_location) 415 | if not res: 416 | return 417 | log.warn('Patched done.') 418 | _relaunch() 419 | 420 | 421 | def _relaunch(): 422 | log.warn('Relaunching...') 423 | # we have to relaunch the process 424 | # pip marker to avoid a relaunch bug 425 | if sys.argv[:3] == ['-c', 'install', '--single-version-externally-managed']: 426 | sys.argv[0] = 'setup.py' 427 | args = [sys.executable] + sys.argv 428 | sys.exit(subprocess.call(args)) 429 | 430 | 431 | def _extractall(self, path=".", members=None): 432 | """Extract all members from the archive to the current working 433 | directory and set owner, modification time and permissions on 434 | directories afterwards. `path' specifies a different directory 435 | to extract to. `members' is optional and must be a subset of the 436 | list returned by getmembers(). 437 | """ 438 | import copy 439 | import operator 440 | from tarfile import ExtractError 441 | directories = [] 442 | 443 | if members is None: 444 | members = self 445 | 446 | for tarinfo in members: 447 | if tarinfo.isdir(): 448 | # Extract directories with a safe mode. 449 | directories.append(tarinfo) 450 | tarinfo = copy.copy(tarinfo) 451 | tarinfo.mode = 448 # decimal for oct 0700 452 | self.extract(tarinfo, path) 453 | 454 | # Reverse sort directories. 455 | if sys.version_info < (2, 4): 456 | def sorter(dir1, dir2): 457 | return cmp(dir1.name, dir2.name) 458 | directories.sort(sorter) 459 | directories.reverse() 460 | else: 461 | directories.sort(key=operator.attrgetter('name'), reverse=True) 462 | 463 | # Set correct owner, mtime and filemode on directories. 464 | for tarinfo in directories: 465 | dirpath = os.path.join(path, tarinfo.name) 466 | try: 467 | self.chown(tarinfo, dirpath) 468 | self.utime(tarinfo, dirpath) 469 | self.chmod(tarinfo, dirpath) 470 | except ExtractError: 471 | e = sys.exc_info()[1] 472 | if self.errorlevel > 1: 473 | raise 474 | else: 475 | self._dbg(1, "tarfile: %s" % e) 476 | 477 | 478 | def main(argv, version=DEFAULT_VERSION): 479 | """Install or upgrade setuptools and EasyInstall""" 480 | tarball = download_setuptools() 481 | _install(tarball) 482 | 483 | 484 | if __name__ == '__main__': 485 | main(sys.argv[1:]) 486 | --------------------------------------------------------------------------------