39 |
40 | {title}
41 |
"""
42 |
43 | link_template = """\
44 |

45 | """
46 |
47 | toc_template = """\
48 |
{title}"""
49 |
50 |
51 | def make_thumbnail(args):
52 | image.thumbnail(args[0], args[1], 0.3)
53 |
54 |
55 | def out_of_date(original, derived):
56 | return (not os.path.exists(derived) or
57 | os.stat(derived).st_mtime < os.stat(original).st_mtime)
58 |
59 |
60 | def gen_gallery(app, doctree):
61 | if app.builder.name != 'html':
62 | return
63 |
64 | outdir = app.builder.outdir
65 | rootdir = 'plot_directive/scot_examples'
66 |
67 | example_sections = list(app.builder.config.mpl_example_sections)
68 | for i, (subdir, title) in enumerate(example_sections):
69 | if subdir in exclude_example_sections:
70 | example_sections.pop(i)
71 |
72 | # images we want to skip for the gallery because they are an unusual
73 | # size that doesn't layout well in a table, or because they may be
74 | # redundant with other images or uninteresting
75 | skips = set([
76 | 'mathtext_examples',
77 | 'matshow_02',
78 | 'matshow_03',
79 | 'matplotlib_icon',
80 | ])
81 |
82 | thumbnails = {}
83 | rows = []
84 | toc_rows = []
85 |
86 | for subdir, title in example_sections:
87 | rows.append(header_template.format(title=title, section=subdir))
88 | toc_rows.append(toc_template.format(title=title, section=subdir))
89 |
90 | origdir = os.path.join('../build', rootdir, subdir)
91 | thumbdir = os.path.join(outdir, rootdir, subdir, 'thumbnails')
92 | if not os.path.exists(thumbdir):
93 | os.makedirs(thumbdir)
94 |
95 | data = []
96 |
97 | for filename in sorted(glob.glob(os.path.join(origdir, '*.png'))):
98 | if filename.endswith("hires.png"):
99 | continue
100 |
101 | path, filename = os.path.split(filename)
102 | basename, ext = os.path.splitext(filename)
103 | if basename in skips:
104 | continue
105 |
106 | # Create thumbnails based on images in tmpdir, and place
107 | # them within the build tree
108 | orig_path = str(os.path.join(origdir, filename))
109 | thumb_path = str(os.path.join(thumbdir, filename))
110 | if out_of_date(orig_path, thumb_path) or True:
111 | thumbnails[orig_path] = thumb_path
112 |
113 | m = multiimage.match(basename)
114 | if m is not None:
115 | basename = m.group(1)
116 |
117 | data.append((subdir, basename,
118 | os.path.join(rootdir, subdir, 'thumbnails', filename)))
119 |
120 | for (subdir, basename, thumbfile) in data:
121 | if thumbfile is not None:
122 | link = 'examples/%s/%s.html'%(subdir, basename)
123 | rows.append(link_template.format(link=link,
124 | thumb=thumbfile,
125 | basename=basename,
126 | title=basename))
127 |
128 | if len(data) == 0:
129 | warnings.warn("No thumbnails were found in %s" % subdir)
130 |
131 | # Close out the
opened up at the top of this loop
132 | rows.append("
")
133 |
134 | content = gallery_template.format(toc='\n'.join(toc_rows),
135 | gallery='\n'.join(rows))
136 |
137 | # Only write out the file if the contents have actually changed.
138 | # Otherwise, this triggers a full rebuild of the docs
139 |
140 | gallery_path = os.path.join(app.builder.srcdir,
141 | '_templates', 'gallery.html')
142 | if os.path.exists(gallery_path):
143 | fh = open(gallery_path, 'r')
144 | regenerate = fh.read() != content
145 | fh.close()
146 | else:
147 | regenerate = True
148 |
149 | if regenerate:
150 | fh = open(gallery_path, 'w')
151 | fh.write(content)
152 | fh.close()
153 |
154 | for key in thumbnails.keys():
155 | if out_of_date(key, thumbnails[key]):
156 | image.thumbnail(key, thumbnails[key], 0.3)
157 |
158 |
159 | def setup(app):
160 | app.connect('env-updated', gen_gallery)
161 |
162 | try: # multiple plugins may use mpl_example_sections
163 | app.add_config_value('mpl_example_sections', [], True)
164 | except sphinx.errors.ExtensionError:
165 | pass # mpl_example_sections already defined
166 |
--------------------------------------------------------------------------------
/doc/sphinxext/gen_rst.py:
--------------------------------------------------------------------------------
1 | """
2 | generate the rst files for the examples by iterating over the pylab examples
3 | """
4 | from __future__ import print_function
5 | import io
6 | import os
7 | import re
8 | import sys
9 |
10 | import sphinx.errors
11 |
12 |
13 | exclude_example_sections = ['widgets']
14 | noplot_regex = re.compile(r"#\s*-\*-\s*noplot\s*-\*-")
15 |
16 |
17 | def out_of_date(original, derived):
18 | """
19 | Returns True if derivative is out-of-date wrt original,
20 | both of which are full file paths.
21 |
22 | TODO: this check isn't adequate in some cases. e.g., if we discover
23 | a bug when building the examples, the original and derived will be
24 | unchanged but we still want to force a rebuild.
25 | """
26 | return (not os.path.exists(derived) or
27 | os.stat(derived).st_mtime < os.stat(original).st_mtime)
28 |
29 | def generate_example_rst(app):
30 | rootdir = os.path.join(app.builder.srcdir, 'scot_examples')
31 | rootdir = os.path.abspath(rootdir)
32 | exampledir = os.path.join(app.builder.srcdir, 'examples')
33 | if not os.path.exists(exampledir):
34 | os.makedirs(exampledir)
35 |
36 | example_sections = list(app.builder.config.mpl_example_sections)
37 | for i, (subdir, title) in enumerate(example_sections):
38 | if subdir in exclude_example_sections:
39 | example_sections.pop(i)
40 | example_subdirs, titles = zip(*example_sections)
41 |
42 | datad = {}
43 | for root, subFolders, files in os.walk(rootdir):
44 | for fname in files:
45 | if ( fname.startswith('.') or fname.startswith('#')
46 | or fname.startswith('_') or not fname.endswith('.py') ):
47 | continue
48 |
49 | fullpath = os.path.join(root,fname)
50 | contents = io.open(fullpath, encoding='utf8').read()
51 | # indent
52 | relpath = os.path.split(root)[-1]
53 | datad.setdefault(relpath, []).append((fullpath, fname, contents))
54 |
55 |
56 | subdirs = list(datad.keys())
57 | subdirs.sort()
58 |
59 | fhindex = open(os.path.join(exampledir, 'index.rst'), 'w')
60 | fhindex.write("""\
61 | .. _examples-index:
62 |
63 | ####################
64 | SCoT Examples
65 | ####################
66 |
67 |
68 | :Release: |version|
69 | :Date: |today|
70 |
71 | .. toctree::
72 | :maxdepth: 2
73 |
74 | """)
75 |
76 | for subdir in subdirs:
77 | rstdir = os.path.join(exampledir, subdir)
78 | if not os.path.exists(rstdir):
79 | os.makedirs(rstdir)
80 |
81 | outputdir = os.path.join(app.builder.outdir, 'examples')
82 | if not os.path.exists(outputdir):
83 | os.makedirs(outputdir)
84 |
85 | outputdir = os.path.join(outputdir, subdir)
86 | if not os.path.exists(outputdir):
87 | os.makedirs(outputdir)
88 |
89 | subdirIndexFile = os.path.join(rstdir, 'index.rst')
90 | fhsubdirIndex = open(subdirIndexFile, 'w')
91 | fhindex.write(' %s/index.rst\n\n'%subdir)
92 |
93 | fhsubdirIndex.write("""\
94 | .. _%s-examples-index:
95 |
96 | ##############################################
97 | %s Examples
98 | ##############################################
99 |
100 |
101 | :Release: |version|
102 | :Date: |today|
103 |
104 | .. toctree::
105 | :maxdepth: 1
106 |
107 | """%(subdir, subdir))
108 |
109 | sys.stdout.write(subdir + ", ")
110 | sys.stdout.flush()
111 |
112 | data = datad[subdir]
113 | data.sort()
114 |
115 | for fullpath, fname, contents in data:
116 | basename, ext = os.path.splitext(fname)
117 | outputfile = os.path.join(outputdir, fname)
118 | #thumbfile = os.path.join(thumb_dir, '%s.png'%basename)
119 | #print ' static_dir=%s, basename=%s, fullpath=%s, fname=%s, thumb_dir=%s, thumbfile=%s'%(static_dir, basename, fullpath, fname, thumb_dir, thumbfile)
120 |
121 | rstfile = '%s.rst'%basename
122 | outrstfile = os.path.join(rstdir, rstfile)
123 |
124 | # XXX: We might consider putting extra metadata in the example
125 | # files to include a title. If so, this line is where we would add
126 | # this information.
127 | fhsubdirIndex.write(' %s <%s>\n'%(os.path.basename(basename),rstfile))
128 |
129 | do_plot = (subdir in example_subdirs
130 | and not noplot_regex.search(contents))
131 |
132 | if not do_plot:
133 | fhstatic = io.open(outputfile, 'w', encoding='utf-8')
134 | fhstatic.write(contents)
135 | fhstatic.close()
136 |
137 | if not out_of_date(fullpath, outrstfile):
138 | continue
139 |
140 | fh = io.open(outrstfile, 'w', encoding='utf-8')
141 | fh.write(u'.. _%s-%s:\n\n' % (subdir, basename))
142 | title = '%s example code: %s'%(subdir, fname)
143 | #title = '

%s example code: %s'%(thumbfile, subdir, fname)
144 |
145 | fh.write(title + u'\n')
146 | fh.write(u'=' * len(title) + u'\n\n')
147 |
148 | if do_plot:
149 | fh.write(u"\n\n.. plot:: %s\n\n::\n\n" % fullpath)
150 | else:
151 | fh.write(u"[`source code <%s>`_]\n\n::\n\n" % fname)
152 |
153 | # indent the contents
154 | contents = u'\n'.join([u' %s'%row.rstrip() for row in contents.split(u'\n')])
155 | fh.write(contents)
156 |
157 | #fh.write(u'\n\nKeywords: python, matplotlib, pylab, example, codex (see :ref:`how-to-search-examples`)')
158 | fh.close()
159 |
160 | fhsubdirIndex.close()
161 |
162 | fhindex.close()
163 |
164 | print()
165 |
166 | def setup(app):
167 | app.connect('builder-inited', generate_example_rst)
168 |
169 | try: # multiple plugins may use mpl_example_sections
170 | app.add_config_value('mpl_example_sections', [], True)
171 | except sphinx.errors.ExtensionError:
172 | pass # mpl_example_sections already defined
173 |
--------------------------------------------------------------------------------
/doc/sphinxext/ipython_console_highlighting.py:
--------------------------------------------------------------------------------
1 | """reST directive for syntax-highlighting ipython interactive sessions.
2 |
3 | XXX - See what improvements can be made based on the new (as of Sept 2009)
4 | 'pycon' lexer for the python console. At the very least it will give better
5 | highlighted tracebacks.
6 | """
7 |
8 | #-----------------------------------------------------------------------------
9 | # Needed modules
10 |
11 | # Standard library
12 | import re
13 |
14 | # Third party
15 | from pygments.lexer import Lexer, do_insertions
16 | from pygments.lexers.agile import (PythonConsoleLexer, PythonLexer,
17 | PythonTracebackLexer)
18 | from pygments.token import Comment, Generic
19 |
20 | from sphinx import highlighting
21 |
22 | #-----------------------------------------------------------------------------
23 | # Global constants
24 | line_re = re.compile('.*?\n')
25 |
26 | #-----------------------------------------------------------------------------
27 | # Code begins - classes and functions
28 |
29 | class IPythonConsoleLexer(Lexer):
30 | """
31 | For IPython console output or doctests, such as:
32 |
33 | .. sourcecode:: ipython
34 |
35 | In [1]: a = 'foo'
36 |
37 | In [2]: a
38 | Out[2]: 'foo'
39 |
40 | In [3]: print a
41 | foo
42 |
43 | In [4]: 1 / 0
44 |
45 | Notes:
46 |
47 | - Tracebacks are not currently supported.
48 |
49 | - It assumes the default IPython prompts, not customized ones.
50 | """
51 |
52 | name = 'IPython console session'
53 | aliases = ['ipython']
54 | mimetypes = ['text/x-ipython-console']
55 | input_prompt = re.compile("(In \[[0-9]+\]: )|( \.\.\.+:)")
56 | output_prompt = re.compile("(Out\[[0-9]+\]: )|( \.\.\.+:)")
57 | continue_prompt = re.compile(" \.\.\.+:")
58 | tb_start = re.compile("\-+")
59 |
60 | def get_tokens_unprocessed(self, text):
61 | pylexer = PythonLexer(**self.options)
62 | tblexer = PythonTracebackLexer(**self.options)
63 |
64 | curcode = ''
65 | insertions = []
66 | for match in line_re.finditer(text):
67 | line = match.group()
68 | input_prompt = self.input_prompt.match(line)
69 | continue_prompt = self.continue_prompt.match(line.rstrip())
70 | output_prompt = self.output_prompt.match(line)
71 | if line.startswith("#"):
72 | insertions.append((len(curcode),
73 | [(0, Comment, line)]))
74 | elif input_prompt is not None:
75 | insertions.append((len(curcode),
76 | [(0, Generic.Prompt, input_prompt.group())]))
77 | curcode += line[input_prompt.end():]
78 | elif continue_prompt is not None:
79 | insertions.append((len(curcode),
80 | [(0, Generic.Prompt, continue_prompt.group())]))
81 | curcode += line[continue_prompt.end():]
82 | elif output_prompt is not None:
83 | # Use the 'error' token for output. We should probably make
84 | # our own token, but error is typicaly in a bright color like
85 | # red, so it works fine for our output prompts.
86 | insertions.append((len(curcode),
87 | [(0, Generic.Error, output_prompt.group())]))
88 | curcode += line[output_prompt.end():]
89 | else:
90 | if curcode:
91 | for item in do_insertions(insertions,
92 | pylexer.get_tokens_unprocessed(curcode)):
93 | yield item
94 | curcode = ''
95 | insertions = []
96 | yield match.start(), Generic.Output, line
97 | if curcode:
98 | for item in do_insertions(insertions,
99 | pylexer.get_tokens_unprocessed(curcode)):
100 | yield item
101 |
102 |
103 | def setup(app):
104 | """Setup as a sphinx extension."""
105 |
106 | # This is only a lexer, so adding it below to pygments appears sufficient.
107 | # But if somebody knows that the right API usage should be to do that via
108 | # sphinx, by all means fix it here. At least having this setup.py
109 | # suppresses the sphinx warning we'd get without it.
110 | pass
111 |
112 | #-----------------------------------------------------------------------------
113 | # Register the extension as a valid pygments lexer
114 | highlighting.lexers['ipython'] = IPythonConsoleLexer()
115 |
--------------------------------------------------------------------------------
/examples/README.txt:
--------------------------------------------------------------------------------
1 | Examples
2 | --------
3 |
4 |
5 |
--------------------------------------------------------------------------------
/examples/misc/MVARICAvsCSPVARICA.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2015 SCoT Development Team
4 |
5 | """
6 | This example shows how to decompose motor imagery EEG into sources using
7 | CSPVARICA and visualize a connectivity measure.
8 | """
9 |
10 | import numpy as np
11 |
12 | import scot
13 |
14 | # The data set contains a continuous 45 channel EEG recording of a motor
15 | # imagery experiment. The data was preprocessed to reduce eye movement
16 | # artifacts and resampled to a sampling rate of 100 Hz. With a visual cue, the
17 | # subject was instructed to perform either hand or foot motor imagery. The
18 | # trigger time points of the cues are stored in 'triggers', and 'classes'
19 | # contains the class labels. Duration of the motor imagery period was
20 | # approximately six seconds.
21 | from scot.datasets import fetch
22 |
23 |
24 | midata = fetch("mi")[0]
25 |
26 | raweeg = midata["eeg"]
27 | triggers = midata["triggers"]
28 | classes = midata["labels"]
29 | fs = midata["fs"]
30 | locs = midata["locations"]
31 |
32 |
33 | # Set random seed for repeatable results
34 | np.random.seed(42)
35 |
36 |
37 | # Prepare data
38 | #
39 | # Here we cut out segments from 3s to 4s after each trigger. This is right in
40 | # the middle of the motor imagery period.
41 | data = scot.datatools.cut_segments(raweeg, triggers, 3 * fs, 4 * fs)
42 |
43 |
44 | # Set up analysis object
45 | #
46 | # We simply choose a VAR model order of 30, and reduction to 4 components.
47 | ws = scot.Workspace({'model_order': 30}, reducedim=4, fs=fs, locations=locs)
48 |
49 | # Configure plotting options
50 | ws.plot_f_range = [0, 30] # only show 0-30 Hz
51 | ws.plot_diagonal = 'S' # put spectral density plots on the diagonal
52 | ws.plot_outside_topo = True # plot topos above and to the left
53 |
54 | # Perform MVARICA
55 | ws.set_data(data, classes)
56 | ws.do_mvarica()
57 | fig1 = ws.plot_connectivity_topos()
58 | ws.set_used_labels(['foot'])
59 | ws.fit_var()
60 | ws.get_connectivity('ffDTF', fig1)
61 | ws.set_used_labels(['hand'])
62 | ws.fit_var()
63 | ws.get_connectivity('ffDTF', fig1)
64 | fig1.suptitle('MVARICA')
65 |
66 | # Perform CSPVARICA
67 | ws.set_data(data, classes)
68 | ws.do_cspvarica()
69 | fig2 = ws.plot_connectivity_topos()
70 | ws.set_used_labels(['foot'])
71 | ws.fit_var()
72 | ws.get_connectivity('ffDTF', fig2)
73 | ws.set_used_labels(['hand'])
74 | ws.fit_var()
75 | ws.get_connectivity('ffDTF', fig2)
76 | fig2.suptitle('CSPVARICA')
77 |
78 | ws.show_plots()
79 |
--------------------------------------------------------------------------------
/examples/misc/circular.py:
--------------------------------------------------------------------------------
1 | """
2 | This example shows how to decompose EEG signals into source activations with
3 | CSPVARICA and visualize connectivity.
4 | """
5 |
6 | import numpy as np
7 | import matplotlib.pyplot as plt
8 |
9 | import scot
10 | from scot.utils import cuthill_mckee
11 | from scot.eegtopo.topoplot import Topoplot
12 | from scot import plotting
13 |
14 |
15 | # The data set contains a continuous 45 channel EEG recording of a motor
16 | # imagery experiment. The data was preprocessed to reduce eye movement
17 | # artifacts and resampled to a sampling rate of 100 Hz. With a visual cue, the
18 | # subject was instructed to perform either hand or foot motor imagery. The
19 | # trigger time points of the cues are stored in 'triggers', and 'classes'
20 | # contains the class labels. Duration of the motor imagery period was
21 | # approximately six seconds.
22 | from scot.datasets import fetch
23 |
24 |
25 | midata = fetch("mi")[0]
26 |
27 | raweeg = midata["eeg"]
28 | triggers = midata["triggers"]
29 | classes = midata["labels"]
30 | fs = midata["fs"]
31 | locs = midata["locations"]
32 |
33 |
34 | # Set random seed for repeatable results
35 | np.random.seed(42)
36 |
37 |
38 | # Prepare data
39 | #
40 | # Here we cut out segments from 2s to 5s after each trigger. This is right in
41 | # the middle of the motor imagery period.
42 | data = scot.datatools.cut_segments(raweeg, triggers, 2 * fs, 5 * fs)
43 |
44 |
45 | # Set up analysis object
46 | #
47 | # We simply choose a VAR model order of 30, and reduction to 15 components.
48 | ws = scot.Workspace({'model_order': 30}, reducedim=15, fs=fs, locations=locs)
49 |
50 |
51 | # Perform CSPVARICA
52 | ws.set_data(data, classes)
53 | ws.do_cspvarica()
54 |
55 |
56 | # Connectivity analysis
57 | #
58 | # Extract the full frequency directed transfer function (ffDTF) from the
59 | # activations of each class and calculate the average value over the alpha band
60 | # (8-12 Hz).
61 |
62 | freq = np.linspace(0, fs, ws.nfft_)
63 | alpha, beta = {}, {}
64 | for c in np.unique(classes):
65 | ws.set_used_labels([c])
66 | ws.fit_var()
67 | con = ws.get_connectivity('ffDTF')
68 | alpha[c] = np.mean(con[:, :, np.logical_and(8 < freq, freq < 12)], axis=2)
69 |
70 | # Prepare topography plots
71 | topo = Topoplot()
72 | topo.set_locations(locs)
73 | mixmaps = plotting.prepare_topoplots(topo, ws.mixing_)
74 |
75 | # Force diagonal (self-connectivity) to 0
76 | np.fill_diagonal(alpha['hand'], 0)
77 | np.fill_diagonal(alpha['foot'], 0)
78 |
79 | order = None
80 | for cls in ['hand', 'foot']:
81 | np.fill_diagonal(alpha[cls], 0)
82 |
83 | w = alpha[cls]
84 | m = alpha[cls] > 4
85 |
86 | # use same ordering of components for each class
87 | if not order:
88 | order = cuthill_mckee(m)
89 |
90 | # fixed color, but alpha varies with connectivity strength
91 | r = np.ones(w.shape)
92 | g = np.zeros(w.shape)
93 | b = np.zeros(w.shape)
94 | a = (alpha[cls]-4) / max(np.max(alpha['hand']-4), np.max(alpha['foot']-4))
95 | c = np.dstack([r, g, b, a])
96 |
97 | plotting.plot_circular(colors=c, widths=w, mask=m, topo=topo,
98 | topomaps=mixmaps, order=order)
99 | plt.title(cls)
100 |
101 | plotting.show_plots()
102 |
--------------------------------------------------------------------------------
/examples/misc/connectivity.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2015 SCoT Development Team
4 |
5 | """
6 | This example shows how to decompose EEG signals into source activations with
7 | CSPVARICA, and visualize a connectivity.
8 | """
9 |
10 | import numpy as np
11 |
12 | import scot
13 |
14 | # The data set contains a continuous 45 channel EEG recording of a motor
15 | # imagery experiment. The data was preprocessed to reduce eye movement
16 | # artifacts and resampled to a sampling rate of 100 Hz. With a visual cue, the
17 | # subject was instructed to perform either hand or foot motor imagery. The
18 | # trigger time points of the cues are stored in 'triggers', and 'classes'
19 | # contains the class labels. Duration of the motor imagery period was
20 | # approximately six seconds.
21 | from scot.datasets import fetch
22 |
23 |
24 | midata = fetch("mi")[0]
25 |
26 | raweeg = midata["eeg"]
27 | triggers = midata["triggers"]
28 | classes = midata["labels"]
29 | fs = midata["fs"]
30 | locs = midata["locations"]
31 |
32 |
33 | # Set random seed for repeatable results
34 | np.random.seed(42)
35 |
36 |
37 | # Prepare data
38 | #
39 | # Here we cut out segments from 3s to 4s after each trigger. This is right in
40 | # the middle of the motor imagery period.
41 | data = scot.datatools.cut_segments(raweeg, triggers, 3 * fs, 4 * fs)
42 |
43 |
44 | # Set up analysis object
45 | #
46 | # We simply choose a VAR model order of 35, and reduction to 4 components
47 | # (that's not a lot).
48 | ws = scot.Workspace({'model_order': 40}, reducedim=4, fs=fs, locations=locs)
49 |
50 |
51 | # Perform CSPVARICA and plot components
52 | ws.set_data(data, classes)
53 | ws.do_cspvarica()
54 |
55 | p = ws.var_.test_whiteness(50)
56 | print('Whiteness:', p)
57 |
58 | # Configure plotting options
59 | ws.plot_f_range = [0, 30] # only show 0-30 Hz
60 | ws.plot_diagonal = 'S' # put spectral density plots on the diagonal
61 | ws.plot_outside_topo = True # plot topos above and to the left
62 |
63 | fig = ws.plot_connectivity_topos()
64 |
65 |
66 | # Connectivity analysis
67 | #
68 | # Extract the full frequency directed transfer function (ffDTF) from the
69 | # activations of each class and plot them.
70 | ws.set_used_labels(['foot'])
71 | ws.fit_var()
72 | ws.get_connectivity('ffDTF', fig)
73 |
74 | ws.set_used_labels(['hand'])
75 | ws.fit_var()
76 | ws.get_connectivity('ffDTF', fig)
77 |
78 | fig.suptitle('CSPVARICA')
79 |
80 | ws.show_plots()
81 |
--------------------------------------------------------------------------------
/examples/misc/features.py:
--------------------------------------------------------------------------------
1 | """
2 | This example shows how to decompose EEG signals into source activations with
3 | CSPVARICA, and subsequently extract single-trial connectivity as features for
4 | LDA classification.
5 | """
6 |
7 | from __future__ import print_function
8 |
9 | import numpy as np
10 | try: # new in sklearn 0.19
11 | from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
12 | except ImportError:
13 | from sklearn.lda import LDA
14 | from sklearn.model_selection import KFold
15 | from sklearn.metrics import confusion_matrix
16 |
17 | import scot.xvschema
18 |
19 | # The data set contains a continuous 45 channel EEG recording of a motor
20 | # imagery experiment. The data was preprocessed to reduce eye movement
21 | # artifacts and resampled to a sampling rate of 100 Hz. With a visual cue, the
22 | # subject was instructed to perform either hand or foot motor imagery. The
23 | # trigger time points of the cues are stored in 'triggers', and 'classes'
24 | # contains the class labels. Duration of the motor imagery period was
25 | # approximately six seconds.
26 | from scot.datasets import fetch
27 |
28 |
29 | midata = fetch("mi")[0]
30 |
31 | raweeg = midata["eeg"]
32 | triggers = midata["triggers"]
33 | classes = midata["labels"]
34 | fs = midata["fs"]
35 | locs = midata["locations"]
36 |
37 |
38 | # Set random seed for repeatable results
39 | np.random.seed(42)
40 |
41 |
42 | # Switch backend to scikit-learn
43 | scot.backend.activate('sklearn')
44 |
45 |
46 | # Set up analysis object
47 | #
48 | # We simply choose a VAR model order of 30, and reduction to 4 components.
49 | ws = scot.Workspace({'model_order': 30}, reducedim=4, fs=fs)
50 | freq = np.linspace(0, fs, ws.nfft_)
51 |
52 |
53 | # Prepare data
54 | #
55 | # Here we cut out segments from 3s to 4s after each trigger. This is right in
56 | # the middle of the motor imagery period.
57 | data = scot.datatools.cut_segments(raweeg, triggers, 3 * fs, 4 * fs)
58 |
59 | # Initialize cross-validation
60 | nfolds = 10
61 | kf = KFold(n_splits=nfolds)
62 |
63 | # LDA requires numeric class labels
64 | cl = np.unique(classes)
65 | classids = np.array([dict(zip(cl, range(len(cl))))[c] for c in classes])
66 |
67 | # Perform cross-validation
68 | lda = LDA()
69 | cm = np.zeros((2, 2))
70 | fold = 0
71 | for train, test in kf.split(data):
72 | fold += 1
73 |
74 | # Perform CSPVARICA
75 | ws.set_data(data[train, :, :], classes[train])
76 | ws.do_cspvarica()
77 |
78 | # Find optimal regularization parameter for single-trial fitting
79 | # ws.var_.xvschema = scot.xvschema.singletrial
80 | # ws.optimize_var()
81 | ws.var_.delta = 1
82 |
83 | # Single-trial fitting and feature extraction
84 | features = np.zeros((len(triggers), 32))
85 | for t in range(len(triggers)):
86 | print('Fold {:2d}/{:2d}, trial: {:d} '.format(fold, nfolds, t),
87 | end='\r')
88 | ws.set_data(data[t, :, :])
89 | ws.fit_var()
90 |
91 | con = ws.get_connectivity('ffPDC')
92 |
93 | alpha = np.mean(con[:, :, np.logical_and(7 < freq, freq < 13)], axis=2)
94 | beta = np.mean(con[:, :, np.logical_and(15 < freq, freq < 25)], axis=2)
95 |
96 | features[t, :] = np.array([alpha, beta]).flatten()
97 |
98 | lda.fit(features[train, :], classids[train])
99 |
100 | acc_train = lda.score(features[train, :], classids[train])
101 | acc_test = lda.score(features[test, :], classids[test])
102 |
103 | print('Fold {:2d}/{:2d}, '
104 | 'acc train: {:.3f}, '
105 | 'acc test: {:.3f}'.format(fold, nfolds, acc_train, acc_test))
106 |
107 | pred = lda.predict(features[test, :])
108 | cm += confusion_matrix(classids[test], pred)
109 |
110 | print('\nConfusion Matrix:\n', cm)
111 | print('\nTotal Accuracy: {:.3f}'.format(np.sum(np.diag(cm))/np.sum(cm)))
112 |
--------------------------------------------------------------------------------
/examples/misc/parallelization.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2015 SCoT Development Team
4 |
5 | """
6 | This example shows how to parallelize certain computations in SCoT.
7 | """
8 |
9 | import numpy as np
10 | import time
11 |
12 | from scot.datatools import cut_segments
13 | from scot.var import VAR
14 |
15 |
16 | # The data set contains a continuous 45 channel EEG recording of a motor
17 | # imagery experiment. The data was preprocessed to reduce eye movement
18 | # artifacts and resampled to a sampling rate of 100 Hz. With a visual cue, the
19 | # subject was instructed to perform either hand or foot motor imagery. The
20 | # trigger time points of the cues are stored in 'triggers', and 'classes'
21 | # contains the class labels. Duration of the motor imagery period was
22 | # approximately six seconds.
23 | from scot.datasets import fetch
24 |
25 |
26 | # Prevent execution of the main script in worker threads
27 | if __name__ == "__main__":
28 |
29 | midata = fetch("mi")[0]
30 |
31 | raweeg = midata["eeg"]
32 | triggers = midata["triggers"]
33 | classes = midata["labels"]
34 | fs = midata["fs"]
35 | locs = midata["locations"]
36 |
37 | # Prepare data
38 | #
39 | # Here we cut out segments from 3s to 4s after each trigger. This is right
40 | # in the middle of the motor imagery period.
41 | data = cut_segments(raweeg, triggers, 3 * fs, 4 * fs)
42 |
43 | # only use every 10th trial to make the example run faster
44 | data = data[::10]
45 |
46 | var = VAR(model_order=5)
47 | var.fit(data)
48 | for n_jobs in [-1, None, 1, 2, 3, 4, 5, 6, 7, 8]:
49 | # Set random seed for repeatable results
50 | np.random.seed(42)
51 | var.n_jobs = n_jobs
52 | start = time.perf_counter()
53 | p = var.test_whiteness(10, repeats=1000)
54 | time1 = time.perf_counter()
55 | print('n_jobs: {:>4s}, whiteness test: {:.2f}s, p = {}'.format(str(n_jobs), time1 - start, p))
56 |
--------------------------------------------------------------------------------
/examples/misc/pca.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2016 SCoT Development Team
4 |
5 | """This example demonstrates that it is possible to reconstruct sources even if
6 | we include PCA in the process.
7 | """
8 |
9 | from __future__ import print_function
10 |
11 | import numpy as np
12 |
13 | from scot.pca import pca
14 | from scot.var import VAR
15 |
16 |
17 | # Set random seed for repeatable results
18 | np.random.seed(42)
19 |
20 | # Generate data from a VAR(1) process
21 | model0 = VAR(1)
22 | model0.coef = np.array([[0.3, -0.6], [0, -0.9]])
23 | x = model0.simulate(10000).squeeze()
24 |
25 | # Transform data with PCA
26 | w, v = pca(x)
27 | y = np.dot(w.T, x)
28 |
29 | # Verify that transformed data y is decorrelated
30 | print('Covariance of x:\n', np.cov(x.squeeze()))
31 | print('\nCovariance of y:\n', np.cov(y.squeeze()))
32 |
33 | model1, model2 = VAR(1), VAR(1)
34 |
35 | # Fit model1 to the original data
36 | model1.fit(x)
37 |
38 | # Fit model2 to the PCA transformed data
39 | model2.fit(y)
40 |
41 | # The coefficients estimated on x (2) are exactly equal to the back-transformed
42 | # coefficients estimated on y (4)
43 | print('\n(1) True VAR coefficients:\n', model0.coef)
44 | print('\n(2) VAR coefficients estimated on x:\n', model1.coef)
45 | print('\n(3) VAR coefficients estimated on y:\n', model2.coef)
46 | print('\n(4) VAR coefficients estimated on y and transformed back:\n',
47 | w.dot(model2.coef).dot(w.T))
48 |
49 | print('\n(5) Check if (2) and (4) are equal:\n',
50 | np.isclose(model1.coef, w.dot(model2.coef).dot(w.T)))
51 |
--------------------------------------------------------------------------------
/examples/misc/statistics.py:
--------------------------------------------------------------------------------
1 | """
2 | This example shows how to create surrogate connectivity to determine if
3 | connectivity is statistically significant.
4 | """
5 |
6 | import numpy as np
7 |
8 | import scot
9 | import numpy as np
10 |
11 | # The data set contains a continuous 45 channel EEG recording of a motor
12 | # imagery experiment. The data was preprocessed to reduce eye movement
13 | # artifacts and resampled to a sampling rate of 100 Hz. With a visual cue, the
14 | # subject was instructed to perform either hand or foot motor imagery. The
15 | # trigger time points of the cues are stored in 'triggers', and 'classes'
16 | # contains the class labels. Duration of the motor imagery period was
17 | # approximately six seconds.
18 | from scot.datasets import fetch
19 |
20 |
21 | midata = fetch("mi")[0]
22 |
23 | raweeg = midata["eeg"]
24 | triggers = midata["triggers"]
25 | classes = midata["labels"]
26 | fs = midata["fs"]
27 | locs = midata["locations"]
28 |
29 |
30 | # Set random seed for repeatable results
31 | np.random.seed(42)
32 |
33 |
34 | # Prepare data
35 | #
36 | # Here we cut out segments from 3s to 4s after each trigger. This is right in
37 | # the middle of the motor imagery period.
38 | data = scot.datatools.cut_segments(raweeg, triggers, 3 * fs, 4 * fs)
39 |
40 |
41 | # Set up analysis object
42 | #
43 | # We choose a VAR model order of 35, and reduction to 4 components.
44 | ws = scot.Workspace({'model_order': 35}, reducedim=4, fs=fs, locations=locs)
45 |
46 | fig = None
47 |
48 | # Perform MVARICA and plot components
49 | ws.set_data(data, classes)
50 | ws.do_mvarica(varfit='class')
51 |
52 | p = ws.var_.test_whiteness(50)
53 | print('Whiteness:', p)
54 |
55 | fig = ws.plot_connectivity_topos(fig=fig)
56 |
57 | p, s, _ = ws.compare_conditions(['hand'], ['foot'], 'ffDTF', repeats=100,
58 | plot=fig)
59 |
60 | print(p)
61 | ws.show_plots()
62 |
--------------------------------------------------------------------------------
/examples/misc/timefrequency.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2015 SCoT Development Team
4 |
5 | """
6 | This example shows how to decompose EEG signals into source activations with
7 | MVARICA, and visualize time varying connectivity.
8 | """
9 |
10 | import numpy as np
11 |
12 | import scot
13 |
14 | # The data set contains a continuous 45 channel EEG recording of a motor
15 | # imagery experiment. The data was preprocessed to reduce eye movement
16 | # artifacts and resampled to a sampling rate of 100 Hz. With a visual cue, the
17 | # subject was instructed to perform either hand or foot motor imagery. The
18 | # trigger time points of the cues are stored in 'triggers', and 'classes'
19 | # contains the class labels. Duration of the motor imagery period was
20 | # approximately six seconds.
21 | from scot.datasets import fetch
22 |
23 |
24 | midata = fetch("mi")[0]
25 |
26 | raweeg = midata["eeg"]
27 | triggers = midata["triggers"]
28 | classes = midata["labels"]
29 | fs = midata["fs"]
30 | locs = midata["locations"]
31 |
32 |
33 | # Set random seed for repeatable results
34 | np.random.seed(42)
35 |
36 |
37 | # Set up analysis object
38 | #
39 | # We simply choose a VAR model order of 35, and reduction to 4 components.
40 | ws = scot.Workspace({'model_order': 40}, reducedim=4, fs=fs, locations=locs)
41 |
42 |
43 | # Prepare data
44 | #
45 | # Here we cut out segments from 3s to 4s after each trigger. This is right in
46 | # the middle of the motor imagery period.
47 | data = scot.datatools.cut_segments(raweeg, triggers, 3 * fs, 4 * fs)
48 |
49 |
50 | # Perform CSPVARICA
51 | ws.set_data(data, classes)
52 | ws.do_cspvarica()
53 |
54 | p = ws.var_.test_whiteness(50)
55 | print('Whiteness:', p)
56 |
57 |
58 | # Prepare data
59 | #
60 | # Here we cut out segments from -2s to 8s around each trigger. This covers the
61 | # whole trial
62 | data = scot.datatools.cut_segments(raweeg, triggers, -2 * fs, 8 * fs)
63 |
64 |
65 | # Configure plotting options
66 | ws.plot_f_range = [0, 30] # only show 0-30 Hz
67 | ws.plot_diagonal = 'topo' # put topo plots on the diagonal
68 | ws.plot_outside_topo = False # no topo plots above and to the left
69 |
70 |
71 | # Connectivity analysis
72 | #
73 | # Extract the full frequency directed transfer function (ffDTF) from the
74 | # activations of each class and plot them.
75 | ws.set_data(data, classes, time_offset=-1)
76 |
77 | fig = ws.plot_connectivity_topos()
78 | ws.set_used_labels(['hand'])
79 | ws.get_tf_connectivity('ffDTF', 1 * fs, int(0.2 * fs), plot=fig,
80 | crange=[0, 30])
81 | fig.suptitle('Hand')
82 |
83 | fig = ws.plot_connectivity_topos()
84 | ws.set_used_labels(['foot'])
85 | ws.get_tf_connectivity('ffDTF', 1 * fs, int(0.2 * fs), plot=fig,
86 | crange=[0, 30])
87 | fig.suptitle('Foot')
88 |
89 | ws.show_plots()
90 |
--------------------------------------------------------------------------------
/examples/misc/validation.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2015 SCoT Development Team
4 |
5 | """
6 | This example shows how to decompose EEG signals into source activations with
7 | MVARICA, and visualize a connectivity measure.
8 | """
9 |
10 | import numpy as np
11 |
12 | import matplotlib.pyplot as plt
13 |
14 | import scot
15 | from scot.varica import cspvarica
16 | from scot.datatools import cut_segments
17 | import scot.plotting as splot
18 |
19 |
20 | # The data set contains a continuous 45 channel EEG recording of a motor
21 | # imagery experiment. The data was preprocessed to reduce eye movement
22 | # artifacts and resampled to a sampling rate of 100 Hz. With a visual cue, the
23 | # subject was instructed to perform either hand or foot motor imagery. The
24 | # trigger time points of the cues are stored in 'triggers', and 'classes'
25 | # contains the class labels. Duration of the motor imagery period was
26 | # approximately six seconds.
27 | from scot.datasets import fetch
28 |
29 |
30 | midata = fetch("mi")[0]
31 |
32 | raweeg = midata["eeg"]
33 | triggers = midata["triggers"]
34 | classes = midata["labels"]
35 | fs = midata["fs"]
36 | locs = midata["locations"]
37 |
38 |
39 | # Set random seed for repeatable results
40 | np.random.seed(42)
41 |
42 |
43 | # Prepare data
44 | #
45 | # Here we cut out segments from 3s to 4s after each trigger. This is right in
46 | # the middle of the motor imagery period.
47 | data = cut_segments(raweeg, triggers, 3 * fs, 4 * fs)
48 |
49 | m = 4 # number of sources to estimate
50 | h = 66 # number of lags for whiteness test
51 |
52 | i = 0
53 | for p in [22, 33]:
54 | i += 1
55 | print('Model order:', p)
56 |
57 | print(' Performing CSPVARICA')
58 | var = scot.backend['var'](p)
59 | result = cspvarica(data, var, classes, m)
60 |
61 | if result.a.is_stable():
62 | s = ''
63 | else:
64 | s = '*NOT* '
65 | print(' VAR model is {}stable.'.format(s))
66 |
67 | # discard the first p residuals
68 | # r = result.var_residuals[p:, :, :]
69 |
70 | print(' Testing VAR residuals for whiteness up to lag', h)
71 | pr = splot.plot_whiteness(result.a, h, repeats=100,
72 | axis=plt.subplot(2, 1, i))
73 |
74 | if pr < 0.05:
75 | plt.gca().set_title('model order {}: residuals significantly '
76 | 'non-white with p={:f}'.format(p, pr))
77 | else:
78 | plt.gca().set_title('model order {}: residuals white '
79 | 'with p={:f}'.format(p, pr))
80 |
81 | splot.show_plots()
82 |
--------------------------------------------------------------------------------
/examples/test/plot_test.py:
--------------------------------------------------------------------------------
1 | """
2 | ==========================
3 | Testing automatic examples
4 | ==========================
5 |
6 | This will produce a simple image.
7 | """
8 |
9 | import numpy as np
10 | import matplotlib.pyplot as plt
11 |
12 | np.random.seed(42)
13 |
14 | plt.plot(np.random.randn(1000))
15 | plt.show()
16 |
--------------------------------------------------------------------------------
/examples/test/premixing.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2015 SCoT Development Team
4 |
5 | """
6 | This example shows how to set the premixing matrix to tell the workspace about
7 | pre-transformed data.
8 | """
9 |
10 | import numpy as np
11 | import matplotlib.pyplot as plt
12 |
13 | import scot
14 |
15 |
16 | # The example data set contains a continuous 45 channel EEG recording of a motor
17 | # imagery experiment. The data was preprocessed to reduce eye movement artifacts
18 | # and resampled to a sampling rate of 100 Hz.
19 | # With a visual cue the subject was instructed to perform either hand of foot
20 | # motor imagery. The the trigger time points of the cues are stored in 'tr', and
21 | # 'cl' contains the class labels (hand: 1, foot: -1). Duration of the motor
22 | # imagery period was approximately 6 seconds.
23 | from scot.datasets import fetch
24 |
25 |
26 | midata = fetch("mi")[0]
27 |
28 | raweeg = midata["eeg"]
29 | triggers = midata["triggers"]
30 | classes = midata["labels"]
31 | fs = midata["fs"]
32 | locs = midata["locations"]
33 |
34 |
35 | # Prepare the data
36 | #
37 | # Here we cut segments from 3s to 4s following each trigger out of the EEG. This
38 | # is right in the middle of the motor imagery period.
39 | data = scot.datatools.cut_segments(raweeg, triggers, 3 * fs, 4 * fs)
40 |
41 | # common average reference
42 | data -= np.mean(data, axis=1, keepdims=True)
43 |
44 | # pre-transform data with a PCA
45 | myunmix, mymix, data = scot.backend['pca'](data, 0.99)
46 |
47 | print('Remaining data components:', data.shape[1])
48 | print('Note that the Topoplots still map to all 45 EEG channels.')
49 |
50 | ws = scot.Workspace({'model_order': 5}, reducedim=4, fs=fs, locations=locs)
51 |
52 | # Perform CSPVARICA and plot the components
53 | ws.set_data(data, classes)
54 | ws.do_cspvarica(varfit='trial')
55 |
56 | ws.set_premixing(mymix)
57 |
58 | ws.plot_source_topos()
59 | plt.show()
60 |
--------------------------------------------------------------------------------
/run_tests2.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | nosetests2 --with-coverage --cover-package=scot,eegtopo --cover-erase --cover-inclusive --cover-branches -x -v
4 |
5 |
--------------------------------------------------------------------------------
/run_tests3.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | nosetests3 --with-coverage --cover-package=scot,eegtopo --cover-erase --cover-inclusive --cover-branches -x -v
4 |
5 |
--------------------------------------------------------------------------------
/scot/__init__.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2016 SCoT Development Team
4 |
5 | """SCoT: The Source Connectivity Toolbox."""
6 |
7 | from __future__ import absolute_import
8 |
9 | from . import config
10 | config = config.load_configuration()
11 |
12 | from .backendmanager import BackendManager
13 | backend = BackendManager()
14 |
15 | # register backends shipped with SCoT
16 | from . import backend_builtin
17 | from . import backend_sklearn
18 | from . import backend_mne
19 | backend.activate(config.get('scot', 'backend'))
20 |
21 | from .ooapi import Workspace
22 |
23 | from .connectivity import Connectivity
24 |
25 | from . import datasets
26 |
27 | __all__ = ['Workspace', 'Connectivity', 'datatools']
28 |
29 | __version__ = "0.3.dev0"
30 |
--------------------------------------------------------------------------------
/scot/backend_builtin.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2016 SCoT Development Team
4 |
5 | """Use internally implemented functions as backend."""
6 |
7 | from __future__ import absolute_import
8 | import scipy as sp
9 |
10 | from . import backend
11 | from . import datatools, pca, csp
12 | from .var import VAR
13 | from .external.infomax_ import infomax
14 |
15 |
16 | def generate():
17 | def wrapper_infomax(data, random_state=None):
18 | """Call Infomax (adapted from MNE) for ICA calculation."""
19 | u = infomax(datatools.cat_trials(data).T, extended=True,
20 | random_state=random_state).T
21 | m = sp.linalg.pinv(u)
22 | return m, u
23 |
24 | def wrapper_pca(x, reducedim):
25 | """Call SCoT's PCA algorithm."""
26 | c, d = pca.pca(datatools.cat_trials(x),
27 | subtract_mean=False, reducedim=reducedim)
28 | y = datatools.dot_special(c.T, x)
29 | return c, d, y
30 |
31 | def wrapper_csp(x, cl, reducedim):
32 | """Call SCoT's CSP algorithm."""
33 | c, d = csp.csp(x, cl, numcomp=reducedim)
34 | y = datatools.dot_special(c.T, x)
35 | return c, d, y
36 |
37 | return {'ica': wrapper_infomax, 'pca': wrapper_pca, 'csp': wrapper_csp,
38 | 'var': VAR}
39 |
40 |
41 | backend.register('builtin', generate)
42 |
--------------------------------------------------------------------------------
/scot/backend_mne.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2016 SCoT Development Team
4 |
5 | """Use mne-python routines as backend."""
6 |
7 | from __future__ import absolute_import
8 | import scipy as sp
9 |
10 | from . import datatools
11 | from . import backend
12 | from . import backend_builtin as builtin
13 |
14 | try:
15 | import mne
16 | except ImportError:
17 | mne = None
18 |
19 |
20 | def generate():
21 | from mne.preprocessing.infomax_ import infomax
22 |
23 | def wrapper_infomax(data, random_state=None):
24 | """Call Infomax for ICA calculation."""
25 | u = infomax(datatools.cat_trials(data).T, extended=True,
26 | random_state=random_state).T
27 | m = sp.linalg.pinv(u)
28 | return m, u
29 |
30 | def wrapper_csp(x, cl, reducedim):
31 | """Call MNE CSP algorithm."""
32 | from mne.decoding import CSP
33 | csp = CSP(n_components=reducedim, cov_est="epoch", reg="ledoit_wolf")
34 | csp.fit(x, cl)
35 | c, d = csp.filters_.T[:, :reducedim], csp.patterns_[:reducedim, :]
36 | y = datatools.dot_special(c.T, x)
37 | return c, d, y
38 |
39 | backend = builtin.generate()
40 | backend.update({'ica': wrapper_infomax, 'csp': wrapper_csp})
41 | return backend
42 |
43 |
44 | if mne is not None:
45 | backend.register('mne', generate)
46 |
--------------------------------------------------------------------------------
/scot/backend_sklearn.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2016 SCoT Development Team
4 |
5 | """Use scikit-learn routines as backend."""
6 |
7 | from __future__ import absolute_import
8 | import numpy as np
9 |
10 | from .datatools import atleast_3d, cat_trials, dot_special
11 | from . import backend
12 | from . import backend_builtin as builtin
13 | from .varbase import VARBase
14 |
15 |
16 | def generate():
17 | from sklearn.decomposition import FastICA
18 | from sklearn.decomposition import PCA
19 |
20 | def wrapper_fastica(data, random_state=None):
21 | """Call FastICA implementation from scikit-learn."""
22 | ica = FastICA(random_state=random_state, whiten="arbitrary-variance")
23 | ica.fit(cat_trials(data).T)
24 | u = ica.components_.T
25 | m = ica.mixing_.T
26 | return m, u
27 |
28 | def wrapper_pca(x, reducedim):
29 | """Call PCA implementation from scikit-learn."""
30 | pca = PCA(n_components=reducedim)
31 | pca.fit(cat_trials(x).T)
32 | d = pca.components_
33 | c = pca.components_.T
34 | y = dot_special(c.T, x)
35 | return c, d, y
36 |
37 | class VAR(VARBase):
38 | """Scikit-learn based implementation of VAR class.
39 |
40 | This class fits VAR models using various implementations of generalized
41 | linear model fitting available in scikit-learn.
42 |
43 | Parameters
44 | ----------
45 | model_order : int
46 | Autoregressive model order.
47 | fitobj : class, optional
48 | Instance of a linear model implementation.
49 | n_jobs : int | None
50 | Number of jobs to run in parallel for various tasks (e.g. whiteness
51 | testing). If set to None, joblib is not used at all. Note that the
52 | main script must be guarded with `if __name__ == '__main__':` when
53 | using parallelization.
54 | verbose : bool
55 | Whether to print informations to stdout.
56 | Default: None - use verbosity from global configuration.
57 | """
58 | def __init__(self, model_order, fitobj=None, n_jobs=1, verbose=None):
59 | VARBase.__init__(self, model_order=model_order, n_jobs=n_jobs,
60 | verbose=verbose)
61 | if fitobj is None:
62 | from sklearn.linear_model import LinearRegression
63 | fitobj = LinearRegression(fit_intercept=False)
64 | self.fitting_model = fitobj
65 |
66 | def fit(self, data):
67 | """Fit VAR model to data.
68 |
69 | Parameters
70 | ----------
71 | data : array, shape (trials, channels, samples)
72 | Continuous or segmented data set. If the data is continuous, a
73 | 2D array of shape (channels, samples) can be provided.
74 |
75 | Returns
76 | -------
77 | self : :class:`VAR`
78 | The :class:`VAR` object.
79 | """
80 | data = atleast_3d(data)
81 | (x, y) = self._construct_eqns(data)
82 | self.fitting_model.fit(x, y)
83 |
84 | self.coef = self.fitting_model.coef_
85 |
86 | self.residuals = data - self.predict(data)
87 | self.rescov = np.cov(cat_trials(self.residuals[:, :, self.p:]))
88 |
89 | return self
90 |
91 | backend = builtin.generate()
92 | backend.update({'ica': wrapper_fastica, 'pca': wrapper_pca, 'var': VAR})
93 | return backend
94 |
95 |
96 | backend.register('sklearn', generate)
97 |
--------------------------------------------------------------------------------
/scot/backendmanager.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2016 SCoT Development Team
4 |
5 | from . import config
6 |
7 |
8 | class BackendManager:
9 | def __init__(self):
10 | self.backends = {}
11 | self.current = None
12 |
13 | def register(self, name, activation_function):
14 | if config.getboolean('scot', 'verbose'):
15 | print('Registering backend:', name)
16 | self.backends[name] = activation_function
17 |
18 | def activate(self, name):
19 | if config.getboolean('scot', 'verbose'):
20 | print('Activating backend:', name)
21 | self.current = self.backends[name]()
22 |
23 | def names(self):
24 | return self.backends.keys()
25 |
26 | def items(self):
27 | return self.backends.items()
28 |
29 | def get_backend(self, name):
30 | return self.backends[name]()
31 |
32 | def __getitem__(self, item):
33 | return self.current[item]
34 |
35 | def __call__(self, name):
36 | self.activate(name)
37 |
--------------------------------------------------------------------------------
/scot/config.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2016 SCoT Development Team
4 |
5 | import os
6 |
7 | try:
8 | from configparser import ConfigParser
9 | except ImportError:
10 | from ConfigParser import ConfigParser
11 |
12 |
13 | def load_configuration():
14 | scotdir = os.path.abspath(os.path.dirname(__file__))
15 | config_files = [os.path.join(scotdir, 'scot.ini'),
16 | '/etc/eegtopo.ini',
17 | '/etc/scot.ini',
18 | os.path.expanduser("~/.eegtopo.ini"),
19 | os.path.expanduser("~/.scot.ini")]
20 | config = ConfigParser()
21 | files = config.read(config_files)
22 | if not files:
23 | raise ValueError('Could not parse configuration.')
24 | return config
25 |
--------------------------------------------------------------------------------
/scot/csp.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2016 SCoT Development Team
4 |
5 | """Common spatial patterns (CSP) implementation."""
6 |
7 | import numpy as np
8 | from scipy.linalg import eigh
9 |
10 |
11 | def csp(x, cl, numcomp=None):
12 | """Calculate common spatial patterns (CSP).
13 |
14 | Parameters
15 | ----------
16 | x : array, shape (trials, channels, samples) or (channels, samples)
17 | EEG data set.
18 | cl : list of valid dict keys
19 | Class labels associated with each trial. Currently, only two classes
20 | are supported.
21 | numcomp : int, optional
22 | Number of patterns to keep after applying CSP. If `numcomp` is greater
23 | than channels or None, all patterns are returned.
24 |
25 | Returns
26 | -------
27 | w : array, shape (channels, components)
28 | CSP weight matrix.
29 | v : array, shape (components, channels)
30 | CSP projection matrix.
31 | """
32 |
33 | x = np.asarray(x)
34 | cl = np.asarray(cl).ravel()
35 |
36 | if x.ndim != 3 or x.shape[0] < 2:
37 | raise AttributeError('CSP requires at least two trials.')
38 |
39 | t, m, n = x.shape
40 |
41 | if t != cl.size:
42 | raise AttributeError('CSP only works with multiple classes. Number of '
43 | 'elements in cl ({}) must equal the first '
44 | 'dimension of x ({})'.format(cl.size, t))
45 |
46 | labels = np.unique(cl)
47 |
48 | if labels.size != 2:
49 | raise AttributeError('CSP is currently implemented for two classes '
50 | 'only (got {}).'.format(labels.size))
51 |
52 | x1 = x[cl == labels[0], :, :]
53 | x2 = x[cl == labels[1], :, :]
54 |
55 | sigma1 = np.zeros((m, m))
56 | for t in range(x1.shape[0]):
57 | sigma1 += np.cov(x1[t, :, :]) / x1.shape[0]
58 | sigma1 /= sigma1.trace()
59 |
60 | sigma2 = np.zeros((m, m))
61 | for t in range(x2.shape[0]):
62 | sigma2 += np.cov(x2[t, :, :]) / x2.shape[0]
63 | sigma2 /= sigma2.trace()
64 |
65 | e, w = eigh(sigma1, sigma1 + sigma2, overwrite_a=True, overwrite_b=True,
66 | check_finite=False)
67 |
68 | order = np.argsort(e)[::-1]
69 | w = w[:, order]
70 | v = np.linalg.inv(w)
71 |
72 | # subsequently remove unwanted components from the middle of w and v
73 | if numcomp is None:
74 | numcomp = w.shape[1]
75 | while w.shape[1] > numcomp:
76 | i = int(np.floor(w.shape[1]/2))
77 | w = np.delete(w, i, 1)
78 | v = np.delete(v, i, 0)
79 |
80 | return w, v
81 |
--------------------------------------------------------------------------------
/scot/datasets.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2016 SCoT Development Team
4 |
5 | from os import makedirs
6 | from os.path import expanduser, isfile, isdir, join
7 | from requests import get
8 | import numpy as np
9 | import hashlib
10 |
11 | from .matfiles import loadmat
12 | from .eegtopo.eegpos3d import positions
13 | from . import config
14 |
15 |
16 | datadir = expanduser(config.get("scot", "data"))
17 | datasets = {"mi": {"files": ["motorimagery.mat"], "md5": ["239a20a672f9f312e9d762daf3adf214"],
18 | "url": "https://github.com/scot-dev/scot-data/raw/main/scotdata/"}}
19 |
20 |
21 | def fetch(dataset="mi", datadir=datadir):
22 | """Fetch example dataset.
23 |
24 | If the requested dataset is not found in the location specified by
25 | `datadir`, the function attempts to download it.
26 |
27 | Parameters
28 | ----------
29 | dataset : str
30 | Which dataset to load. Currently only 'mi' is supported.
31 | datadir : str
32 | Path to the storage location of example datasets. Datasets are
33 | downloaded to this location if they cannot be found. If the directory
34 | does not exist it is created.
35 |
36 | Returns
37 | -------
38 | data : list of dicts
39 | The data set is stored in a list, where each list element
40 | corresponds to data from one subject. Each list element is a
41 | dictionary with the following keys:
42 | - "eeg" ... EEG signals
43 | - "triggers" ... Trigger latencies
44 | - "labels" ... Class labels
45 | - "fs" ... Sample rate
46 | - "locations" ... Channel locations
47 | """
48 | if dataset not in datasets:
49 | raise ValueError("Example data '{}' not available.".format(dataset))
50 | else:
51 | files = datasets[dataset]["files"]
52 | url = datasets[dataset]["url"]
53 | md5 = datasets[dataset]["md5"]
54 | if not isdir(datadir):
55 | makedirs(datadir)
56 |
57 | data = []
58 |
59 | for n, filename in enumerate(files):
60 | fullfile = join(datadir, filename)
61 | if not isfile(fullfile):
62 | with open(fullfile, "wb") as f:
63 | response = get(join(url, filename))
64 | f.write(response.content)
65 | with open(fullfile, "rb") as f: # check if MD5 of downloaded file matches original hash
66 | hash = hashlib.md5(f.read()).hexdigest()
67 | if hash != md5[n]:
68 | raise MD5MismatchError("MD5 hash of {} {} does not match {}.".format(fullfile, hash, md5[n]))
69 | data.append(convert(dataset, loadmat(fullfile)))
70 |
71 | return data
72 |
73 |
74 | def convert(dataset, mat):
75 | if dataset == "mi":
76 | mat = mat["s0"]
77 |
78 | data = {}
79 | data["fs"] = mat["fs"]
80 | data["triggers"] = np.asarray(mat["tr"], dtype=int)
81 | data["eeg"] = mat["eeg"].T
82 |
83 | cltrans = {1: "hand", -1: "foot"}
84 | data["labels"] = np.array([cltrans[c] for c in mat["cl"]])
85 |
86 | # Set EEG channel labels manually
87 | labels = ["AF7", "AFz", "AF8", "F3", "F1", "Fz", "F2", "F4", "FT7", "FC5",
88 | "FC3", "FC1", "FCz", "FC2", "FC4", "FC6", "FT8", "C5", "C3",
89 | "C1", "Cz", "C2", "C4", "C6", "CP5", "CP3", "CP1", "CPz", "CP2",
90 | "CP4", "CP6", "P7", "P3", "Pz", "P4", "P8", "PO3", "POz", "PO4",
91 | "O1", "Oz", "O2", "O9", "Iz", "O10"]
92 | data["locations"] = [[v for v in positions[l].vector] for l in labels]
93 | return data
94 |
95 |
96 | class MD5MismatchError(Exception):
97 | pass
98 |
--------------------------------------------------------------------------------
/scot/datatools.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2015 SCoT Development Team
4 |
5 | """
6 | Summary
7 | -------
8 | Tools for basic data manipulation.
9 | """
10 |
11 | from __future__ import division
12 |
13 | import numpy as np
14 |
15 | from .utils import check_random_state
16 |
17 |
18 | def atleast_3d(x):
19 | x = np.asarray(x)
20 | if x.ndim >= 3:
21 | return x
22 | elif x.ndim == 2:
23 | return x[np.newaxis, ...]
24 | else:
25 | return x[np.newaxis, np.newaxis, :]
26 |
27 |
28 | def cut_segments(x2d, tr, start, stop):
29 | """Cut continuous signal into segments.
30 |
31 | Parameters
32 | ----------
33 | x2d : array, shape (m, n)
34 | Input data with m signals and n samples.
35 | tr : list of int
36 | Trigger positions.
37 | start : int
38 | Window start (offset relative to trigger).
39 | stop : int
40 | Window end (offset relative to trigger).
41 |
42 | Returns
43 | -------
44 | x3d : array, shape (len(tr), m, stop-start)
45 | Segments cut from data. Individual segments are stacked along the first
46 | dimension.
47 |
48 | See also
49 | --------
50 | cat_trials : Concatenate segments.
51 |
52 | Examples
53 | --------
54 | >>> data = np.random.randn(5, 1000) # 5 channels, 1000 samples
55 | >>> tr = [750, 500, 250] # three segments
56 | >>> x3d = cut_segments(data, tr, 50, 100) # each segment is 50 samples
57 | >>> x3d.shape
58 | (3, 5, 50)
59 | """
60 | if start != int(start):
61 | raise ValueError("start index must be an integer")
62 | if stop != int(stop):
63 | raise ValueError("stop index must be an integer")
64 |
65 | x2d = np.atleast_2d(x2d)
66 | tr = np.asarray(tr, dtype=int).ravel()
67 | win = np.arange(start, stop, dtype=int)
68 | return np.concatenate([x2d[np.newaxis, :, t + win] for t in tr])
69 |
70 |
71 | def cat_trials(x3d):
72 | """Concatenate trials along time axis.
73 |
74 | Parameters
75 | ----------
76 | x3d : array, shape (t, m, n)
77 | Segmented input data with t trials, m signals, and n samples.
78 |
79 | Returns
80 | -------
81 | x2d : array, shape (m, t * n)
82 | Trials are concatenated along the second axis.
83 |
84 | See also
85 | --------
86 | cut_segments : Cut segments from continuous data.
87 |
88 | Examples
89 | --------
90 | >>> x = np.random.randn(6, 4, 150)
91 | >>> y = cat_trials(x)
92 | >>> y.shape
93 | (4, 900)
94 | """
95 | x3d = atleast_3d(x3d)
96 | t = x3d.shape[0]
97 | return np.concatenate(np.split(x3d, t, 0), axis=2).squeeze(0)
98 |
99 |
100 | def dot_special(x2d, x3d):
101 | """Segment-wise dot product.
102 |
103 | This function calculates the dot product of x2d with each trial of x3d.
104 |
105 | Parameters
106 | ----------
107 | x2d : array, shape (p, m)
108 | Input argument.
109 | x3d : array, shape (t, m, n)
110 | Segmented input data with t trials, m signals, and n samples. The dot
111 | product with x2d is calculated for each trial.
112 |
113 | Returns
114 | -------
115 | out : array, shape (t, p, n)
116 | Dot product of x2d with each trial of x3d.
117 |
118 | Examples
119 | --------
120 | >>> x = np.random.randn(6, 40, 150)
121 | >>> a = np.ones((7, 40))
122 | >>> y = dot_special(a, x)
123 | >>> y.shape
124 | (6, 7, 150)
125 | """
126 | x3d = atleast_3d(x3d)
127 | x2d = np.atleast_2d(x2d)
128 | return np.concatenate([x2d.dot(x3d[i, ...])[np.newaxis, ...]
129 | for i in range(x3d.shape[0])])
130 |
131 |
132 | def randomize_phase(data, random_state=None):
133 | """Phase randomization.
134 |
135 | This function randomizes the spectral phase of the input data along the
136 | last dimension.
137 |
138 | Parameters
139 | ----------
140 | data : array
141 | Input array.
142 |
143 | Returns
144 | -------
145 | out : array
146 | Array of same shape as data.
147 |
148 | Notes
149 | -----
150 | The algorithm randomizes the phase component of the input's complex Fourier
151 | transform.
152 |
153 | Examples
154 | --------
155 | .. plot::
156 | :include-source:
157 |
158 | from pylab import *
159 | from scot.datatools import randomize_phase
160 | np.random.seed(1234)
161 | s = np.sin(np.linspace(0,10*np.pi,1000))
162 | x = np.vstack([s, np.sign(s)])
163 | y = randomize_phase(x)
164 | subplot(2,1,1)
165 | title('Phase randomization of sine wave and rectangular function')
166 | plot(x.T + [1.5, -1.5]), axis([0,1000,-3,3])
167 | subplot(2,1,2)
168 | plot(y.T + [1.5, -1.5]), axis([0,1000,-3,3])
169 | plt.show()
170 | """
171 | rng = check_random_state(random_state)
172 | data = np.asarray(data)
173 | data_freq = np.fft.rfft(data)
174 | data_freq = np.abs(data_freq) * np.exp(1j*rng.random_sample(data_freq.shape)*2*np.pi)
175 | return np.fft.irfft(data_freq, data.shape[-1])
176 |
177 |
178 | def acm(x, l):
179 | """Compute autocovariance matrix at lag l.
180 |
181 | This function calculates the autocovariance matrix of `x` at lag `l`.
182 |
183 | Parameters
184 | ----------
185 | x : array, shape (n_trials, n_channels, n_samples)
186 | Signal data (2D or 3D for multiple trials)
187 | l : int
188 | Lag
189 |
190 | Returns
191 | -------
192 | c : ndarray, shape = [nchannels, n_channels]
193 | Autocovariance matrix of `x` at lag `l`.
194 | """
195 | x = atleast_3d(x)
196 |
197 | if l > x.shape[2]-1:
198 | raise AttributeError("lag exceeds data length")
199 |
200 | ## subtract mean from each trial
201 | #for t in range(x.shape[2]):
202 | # x[:, :, t] -= np.mean(x[:, :, t], axis=0)
203 |
204 | if l == 0:
205 | a, b = x, x
206 | else:
207 | a = x[:, :, l:]
208 | b = x[:, :, 0:-l]
209 |
210 | c = np.zeros((x.shape[1], x.shape[1]))
211 | for t in range(x.shape[0]):
212 | c += a[t, :, :].dot(b[t, :, :].T) / a.shape[2]
213 | c /= x.shape[0]
214 |
215 | return c.T
216 |
--------------------------------------------------------------------------------
/scot/eegtopo/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/scot-dev/scot/1e5dd285bbf44d8078e3177ca31ceb01b4aa1d61/scot/eegtopo/__init__.py
--------------------------------------------------------------------------------
/scot/eegtopo/eegpos3d.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013 Martin Billinger
4 |
5 | """Module to generate 3d EEG locations"""
6 |
7 | from numpy import *
8 |
9 | from scot.eegtopo import geo_spherical as geo
10 | from .tools import Struct
11 |
12 | Point = geo.Point
13 | Line = geo.Line
14 | Circle = geo.Circle
15 | construct = geo.Construct
16 |
17 |
18 | def intersection(a, b, expr=lambda c: c.vector.z >= 0):
19 | pts = construct.circle_intersect_circle(a, b)
20 | return [c for c in pts if expr(c)]
21 |
22 |
23 | midpoint = construct.midpoint
24 |
25 |
26 | #noinspection PyPep8Naming
27 | def construct_1020_easycap(variant=0):
28 | p = Struct()
29 |
30 | p.Cz = Point(0, 0, 1)
31 | p.Fpz = Point(0, 1, 0)
32 | p.Oz = Point(0, -1, 0)
33 | p.T7 = Point(-1, 0, 0)
34 | p.T8 = Point(1, 0, 0)
35 |
36 | #horizontal = Circle(p.Cz, p.Fpz) # grand-circle in horizontal plane
37 | #sagittal = Circle(p.T7, p.Cz) # grand-circle in sagittal plane
38 | #coronal = Circle(p.Oz, p.Cz) # grand-circle in coronal plane
39 |
40 | horizontal = Line(p.Fpz, p.T7) # grand-circle in horizontal plane
41 | sagittal = Line(p.Fpz, p.Cz) # grand-circle in sagittal plane
42 | coronal = Line(p.T7, p.Cz) # grand-circle in coronal plane
43 |
44 | p.Fz = sagittal.get_point(0.5)
45 | p.Pz = sagittal.get_point(1.5)
46 | p.C3 = coronal.get_point(0.5)
47 | p.C4 = coronal.get_point(1.5)
48 | p.Fp1 = horizontal.get_point(0.2)
49 | p.Fp2 = horizontal.get_point(-0.2)
50 | p.F7 = horizontal.get_point(0.6)
51 | p.F8 = horizontal.get_point(-0.6)
52 | p.P7 = horizontal.get_point(1.4)
53 | p.P8 = horizontal.get_point(-1.4)
54 | p.O1 = horizontal.get_point(1.8)
55 | p.O2 = horizontal.get_point(-1.8)
56 |
57 | circle_F = Circle(p.F7, p.Fz, p.F8)
58 | circle_P = Circle(p.P7, p.Pz, p.P8)
59 |
60 | if variant == 0:
61 | circle_3 = Circle(p.Fp1, p.C3, p.O1)
62 | circle_4 = Circle(p.Fp2, p.C4, p.O2)
63 | #elif variant == 1:
64 | else:
65 | circle_3 = Circle(p.Fpz, p.C3, p.Oz)
66 | circle_4 = Circle(p.Fpz, p.C4, p.Oz)
67 |
68 | p.F3 = intersection(circle_3, circle_F)[0]
69 | p.F4 = intersection(circle_4, circle_F)[0]
70 | p.P3 = intersection(circle_3, circle_P)[0]
71 | p.P4 = intersection(circle_4, circle_P)[0]
72 |
73 | p.AFz = midpoint(p.Fpz, p.Fz)
74 | p.AF7 = midpoint(p.Fp1, p.F7)
75 | p.AF8 = midpoint(p.Fp2, p.F8)
76 |
77 | circle_AF = Circle(p.AF7, p.AFz, p.AF8)
78 | angle_AF = circle_AF.angle(p.AF7) / 2
79 | p.AF3 = circle_AF.get_point(angle_AF)
80 | p.AF4 = circle_AF.get_point(-angle_AF)
81 |
82 | angle_F2 = circle_F.angle(p.F4) / 2
83 | angle_F6 = circle_F.angle(p.F4) + (circle_F.angle(p.F8) - circle_F.angle(p.F4)) / 2
84 | p.F2 = circle_F.get_point(angle_F2)
85 | p.F1 = circle_F.get_point(-angle_F2)
86 | p.F6 = circle_F.get_point(angle_F6)
87 | p.F5 = circle_F.get_point(-angle_F6)
88 |
89 | p.C1 = midpoint(p.C3, p.Cz)
90 | p.C2 = midpoint(p.C4, p.Cz)
91 | p.C5 = midpoint(p.C3, p.T7)
92 | p.C6 = midpoint(p.C4, p.T8)
93 |
94 | angle_P2 = circle_P.angle(p.P4) / 2
95 | angle_P6 = circle_P.angle(p.P4) + (circle_P.angle(p.P8) - circle_P.angle(p.P4)) / 2
96 | p.P2 = circle_P.get_point(angle_P2)
97 | p.P1 = circle_P.get_point(-angle_P2)
98 | p.P6 = circle_P.get_point(angle_P6)
99 | p.P5 = circle_P.get_point(-angle_P6)
100 |
101 | circle_5 = Circle(p.F5, p.C5, p.P5)
102 | circle_1 = Circle(p.F1, p.C1, p.P1)
103 | circle_2 = Circle(p.F2, p.C2, p.P2)
104 | circle_6 = Circle(p.F6, p.C6, p.P6)
105 |
106 | p.FCz = midpoint(p.Fz, p.Cz)
107 | p.FT7 = midpoint(p.F7, p.T7)
108 | p.FT8 = midpoint(p.F8, p.T8)
109 |
110 | p.CPz = midpoint(p.Cz, p.Pz)
111 | p.TP7 = midpoint(p.T7, p.P7)
112 | p.TP8 = midpoint(p.T8, p.P8)
113 |
114 | circle_FC = Circle(p.FT7, p.FCz, p.FT8)
115 | circle_CP = Circle(p.TP7, p.CPz, p.TP8)
116 |
117 | p.FC5 = intersection(circle_5, circle_FC)[0]
118 | p.FC3 = intersection(circle_3, circle_FC)[0]
119 | p.FC1 = intersection(circle_1, circle_FC)[0]
120 | p.FC2 = intersection(circle_2, circle_FC)[0]
121 | p.FC4 = intersection(circle_4, circle_FC)[0]
122 | p.FC6 = intersection(circle_6, circle_FC)[0]
123 |
124 | p.CP5 = intersection(circle_5, circle_CP)[0]
125 | p.CP3 = intersection(circle_3, circle_CP)[0]
126 | p.CP1 = intersection(circle_1, circle_CP)[0]
127 | p.CP2 = intersection(circle_2, circle_CP)[0]
128 | p.CP4 = intersection(circle_4, circle_CP)[0]
129 | p.CP6 = intersection(circle_6, circle_CP)[0]
130 |
131 | p.POz = midpoint(p.Pz, p.Oz)
132 | p.PO7 = midpoint(p.P7, p.O1)
133 | p.PO8 = midpoint(p.P8, p.O2)
134 |
135 | circle_PO = Circle(p.PO7, p.POz, p.PO8)
136 | angle_PO = circle_PO.angle(p.PO7) / 2
137 | p.PO3 = circle_PO.get_point(-angle_PO)
138 | p.PO4 = circle_PO.get_point(angle_PO)
139 |
140 | # below the equator
141 |
142 | p.Iz = sagittal.get_point(2.25)
143 | p.T9 = coronal.get_point(-0.25)
144 | p.T10 = coronal.get_point(2.25)
145 |
146 | circle_9 = Circle(p.T9, p.Iz, p.T10)
147 |
148 | p.O9 = circle_9.get_point(-pi / 2 * 0.2)
149 | p.O10 = circle_9.get_point(pi / 2 * 0.2)
150 |
151 | p.PO9 = circle_9.get_point(-pi / 2 * 0.4)
152 | p.PO10 = circle_9.get_point(pi / 2 * 0.4)
153 |
154 | p.P9 = circle_9.get_point(-pi / 2 * 0.6)
155 | p.P10 = circle_9.get_point(pi / 2 * 0.6)
156 |
157 | p.TP9 = circle_9.get_point(-pi / 2 * 0.8)
158 | p.TP10 = circle_9.get_point(pi / 2 * 0.8)
159 |
160 | p.FT9 = circle_9.get_point(-pi / 2 * 1.2)
161 | p.FT10 = circle_9.get_point(pi / 2 * 1.2)
162 |
163 | p.F9 = circle_9.get_point(-pi / 2 * 1.4)
164 | p.F10 = circle_9.get_point(pi / 2 * 1.4)
165 |
166 | return p
167 |
168 |
169 | positions = construct_1020_easycap()
170 |
--------------------------------------------------------------------------------
/scot/eegtopo/geo_euclidean.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013 Martin Billinger
4 |
5 | """Euclidean geometry support module"""
6 |
7 | from __future__ import division
8 |
9 | import math
10 |
11 |
12 | class Vector(object):
13 | """3D-Vector class"""
14 |
15 | def __init__(self, x=0.0, y=0.0, z=0.0):
16 | """Initialize from three numbers"""
17 | self.x, self.y, self.z = float(x), float(y), float(z)
18 |
19 | @classmethod
20 | def fromiterable(cls, itr):
21 | """Initialize from iterable"""
22 | x, y, z = itr
23 | return cls(x, y, z)
24 |
25 | @classmethod
26 | def fromvector(cls, v):
27 | """Copy another vector"""
28 | return cls(v.x, v.y, v.z)
29 |
30 | def __getitem__(self, index):
31 | if index == 0:
32 | return self.x
33 | if index == 1:
34 | return self.y
35 | if index == 2:
36 | return self.z
37 |
38 | def __setitem__(self, index, value):
39 | if index == 0:
40 | self.x = value
41 | if index == 1:
42 | self.y = value
43 | if index == 2:
44 | self.z = value
45 |
46 | def __iter__(self):
47 | yield self.x
48 | yield self.y
49 | yield self.z
50 |
51 | def copy(self):
52 | """return a copy of this vector"""
53 | return Vector(self.x, self.y, self.z)
54 |
55 | def __repr__(self):
56 | return ''.join((self.__class__.__name__, '(', str(self.x), ', ', str(self.y), ', ', str(self.z), ')'))
57 |
58 | def __eq__(self, other):
59 | return self.x == other[0] and self.y == other[1] and self.z == other[2]
60 |
61 | def close(self, other, epsilon=1e-10):
62 | return all([abs(v) <= epsilon for v in self-other])
63 |
64 | def __add__(self, other):
65 | if isinstance(other, Vector):
66 | return Vector(self.x + other.x, self.y + other.y, self.z + other.z)
67 | else:
68 | return Vector(self.x + other, self.y + other, self.z + other)
69 |
70 | def __sub__(self, other):
71 | if isinstance(other, Vector):
72 | return Vector(self.x - other.x, self.y - other.y, self.z - other.z)
73 | else:
74 | return Vector(self.x - other, self.y - other, self.z - other)
75 |
76 | def __mul__(self, other):
77 | if isinstance(other, Vector):
78 | return Vector(self.x * other.x, self.y * other.y, self.z * other.z)
79 | else:
80 | return Vector(self.x * other, self.y * other, self.z * other)
81 |
82 | def __truediv__(self, other):
83 | if isinstance(other, Vector):
84 | return Vector(self.x / other.x, self.y / other.y, self.z / other.z)
85 | else:
86 | return Vector(self.x / other, self.y / other, self.z / other)
87 |
88 | def __iadd__(self, other):
89 | if isinstance(other, Vector):
90 | self.x, self.y, self.z = self.x + other.x, self.y + other.y, self.z + other.z
91 | else:
92 | self.x, self.y, self.z = self.x + other, self.y + other, self.z + other
93 | return self
94 |
95 | def __isub__(self, other):
96 | if isinstance(other, Vector):
97 | self.x, self.y, self.z = self.x - other.x, self.y - other.y, self.z - other.z
98 | else:
99 | self.x, self.y, self.z = self.x - other, self.y - other, self.z - other
100 | return self
101 |
102 | def __imul__(self, other):
103 | if isinstance(other, Vector):
104 | self.x, self.y, self.z = self.x * other.x, self.y * other.y, self.z * other.z
105 | else:
106 | self.x, self.y, self.z = self.x * other, self.y * other, self.z * other
107 | return self
108 |
109 | def __itruediv__(self, other):
110 | if isinstance(other, Vector):
111 | self.x, self.y, self.z = self.x / other.x, self.y / other.y, self.z / other.z
112 | else:
113 | self.x, self.y, self.z = self.x / other, self.y / other, self.z / other
114 | return self
115 |
116 | def dot(self, other):
117 | """Dot product with another vector"""
118 | return self.x * other.x + self.y * other.y + self.z * other.z
119 |
120 | def cross(self, other):
121 | """Cross product with another vector"""
122 | x = self.y * other.z - self.z * other.y
123 | y = self.z * other.x - self.x * other.z
124 | z = self.x * other.y - self.y * other.x
125 | return Vector(x, y, z)
126 |
127 | def norm2(self):
128 | """Squared norm of the vector"""
129 | return self.x * self.x + self.y * self.y + self.z * self.z
130 |
131 | def norm(self):
132 | """Length of the vector"""
133 | return math.sqrt(self.norm2())
134 |
135 | def normalize(self):
136 | """Normalize vector to length 1"""
137 | #noinspection PyMethodFirstArgAssignment
138 | self /= self.norm()
139 | return self
140 |
141 | def normalized(self):
142 | """Return normalized vector, but don't change original"""
143 | return self / self.norm()
144 |
145 | def rotate(self, l, u):
146 | """rotate l radians around axis u"""
147 | cl = math.cos(l)
148 | sl = math.sin(l)
149 | x = (cl + u.x * u.x * (1 - cl)) * self.x + (u.x * u.y * (1 - cl) - u.z * sl) * self.y + (
150 | u.x * u.z * (1 - cl) + u.y * sl) * self.z
151 | y = (u.y * u.x * (1 - cl) + u.z * sl) * self.x + (cl + u.y * u.y * (1 - cl)) * self.y + (
152 | u.y * u.z * (1 - cl) - u.x * sl) * self.z
153 | z = (u.z * u.x * (1 - cl) - u.y * sl) * self.x + (u.z * u.y * (1 - cl) + u.x * sl) * self.y + (
154 | cl + u.z * u.z * (1 - cl)) * self.z
155 | self.x, self.y, self.z = x, y, z
156 | return self
157 |
158 | def rotated(self, l, u):
159 | """rotate l radians around axis, but don't change original"""
160 | return self.copy().rotate(l, u)
161 |
162 |
--------------------------------------------------------------------------------
/scot/eegtopo/geo_spherical.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013 Martin Billinger
4 |
5 | """Spherical geometry support module"""
6 |
7 | from __future__ import division
8 |
9 | import math
10 |
11 | from .geo_euclidean import Vector
12 |
13 |
14 | ################################################################################
15 |
16 | eps = 1e-15
17 |
18 | ################################################################################
19 |
20 | class Point(object):
21 | """Point on the surface of a sphere"""
22 |
23 | def __init__(self, x=None, y=None, z=None):
24 | if x is None and y is None and z is None:
25 | self._pos3d = Vector(0, 0, 1)
26 | elif x is not None and y is not None and z is None:
27 | self._pos3d = Vector(x, y, math.sqrt(1 - x ** 2 - y ** 2))
28 | elif x is not None and y is not None and z is not None:
29 | self._pos3d = Vector(x, y, z).normalized()
30 | else:
31 | raise RuntimeError('invalid parameters')
32 |
33 | @classmethod
34 | def fromvector(cls, v):
35 | """Initialize from euclidean vector"""
36 | w = v.normalized()
37 | return cls(w.x, w.y, w.z)
38 |
39 | @property
40 | def vector(self):
41 | """position in 3d space"""
42 | return self._pos3d
43 |
44 | @property
45 | def list(self):
46 | """position in 3d space"""
47 | return [self._pos3d.x, self._pos3d.y, self._pos3d.z]
48 |
49 | @vector.setter
50 | def vector(self, v):
51 | self._pos3d.x = v.x
52 | self._pos3d.y = v.y
53 | self._pos3d.z = v.z
54 |
55 | def __repr__(self):
56 | return ''.join(
57 | (self.__class__.__name__, '(', str(self._pos3d.x), ', ', str(self._pos3d.y), ', ', str(self._pos3d.z), ')'))
58 |
59 | def distance(self, other):
60 | """Distance to another point on the sphere"""
61 | return math.acos(self._pos3d.dot(other.vector))
62 |
63 | def distances(self, points):
64 | """Distance to other points on the sphere"""
65 | return [math.acos(self._pos3d.dot(p.vector)) for p in points]
66 |
67 | ################################################################################
68 |
69 | class Line(object):
70 | """Line on the spherical surface (also known as grand circle)"""
71 |
72 | def __init__(self, a, b):
73 | self.a = Point.fromvector(a.vector)
74 | self.b = Point.fromvector(b.vector)
75 |
76 | def get_point(self, l):
77 | d = self.a.distance(self.b)
78 | n = self.a.vector.cross(self.b.vector)
79 | p = Point.fromvector(self.a.vector)
80 | p.vector.rotate(l * d, n)
81 | return p
82 |
83 | def distance(self, p):
84 | n = Point.fromvector(self.a.vector.cross(self.b.vector))
85 | return abs(math.pi / 2 - n.distance(p))
86 |
87 | ################################################################################
88 |
89 | class Circle(object):
90 | """Arbitrary circle on the spherical surface"""
91 |
92 | def __init__(self, a, b, c=None):
93 | if c is None:
94 | self.c = Point.fromvector(a.vector) # Center
95 | self.x = Point.fromvector(b.vector) # a point on the circle
96 | else:
97 | self.c = Point.fromvector((b.vector - a.vector).cross(c.vector - b.vector).normalized()) # Center
98 | self.x = Point.fromvector(b.vector) # a point on the circle
99 |
100 | def get_point(self, l):
101 | return Point.fromvector(self.x.vector.rotated(l, self.c.vector))
102 |
103 | def get_radius(self):
104 | return self.c.distance(self.x)
105 |
106 | def angle(self, p):
107 |
108 | c = self.c.vector * self.x.vector.dot(self.c.vector) # center in circle plane
109 |
110 | a = (self.x.vector - c).normalized()
111 | b = (p.vector - c).normalized()
112 | return math.acos(a.dot(b))
113 |
114 | def distance(self, p):
115 | return abs(self.c.distance(p) - self.c.distance(self.x))
116 |
117 |
118 | ################################################################################
119 |
120 | class Construct(object):
121 | """Collection of methods for geometric construction on a sphere"""
122 |
123 | @staticmethod
124 | def midpoint(a, b):
125 | """Point exactly between a and b"""
126 | return Point.fromvector((a.vector + b.vector) / 2)
127 |
128 | @staticmethod
129 | def line_intersect_line(k, l):
130 | c1 = k.a.vector.cross(k.b.vector)
131 | c2 = l.a.vector.cross(l.b.vector)
132 | p = c1.cross(c2)
133 | return Point.fromvector(p), Point.fromvector(p * -1)
134 |
135 | @staticmethod
136 | def line_intersect_circle(line, circle):
137 | cross_line = line.a.vector.cross(line.b.vector)
138 | cross_lc = cross_line.cross(circle.c.vector)
139 | dot_circle = circle.c.vector.dot(circle.x.vector)
140 | if abs(cross_lc.z) > eps:
141 | a = cross_lc.dot(cross_lc)
142 | b = 2 * dot_circle * cross_line.cross(cross_lc).z
143 | circle = dot_circle * dot_circle * (cross_line.x ** 2 + cross_line.y ** 2) - cross_lc.z ** 2
144 | s = math.sqrt(b ** 2 - 4 * a * circle)
145 | z1 = (s - b) / (2 * a)
146 | x1 = (cross_lc.x * z1 - cross_line.y * dot_circle) / cross_lc.z
147 | y1 = (cross_lc.y * z1 + cross_line.x * dot_circle) / cross_lc.z
148 | z2 = -(s + b) / (2 * a)
149 | x2 = (cross_lc.x * z2 - cross_line.y * dot_circle) / cross_lc.z
150 | y2 = (cross_lc.y * z2 + cross_line.x * dot_circle) / cross_lc.z
151 | return Point(x1, y1, z1), Point(x2, y2, z2)
152 | else:
153 | return None
154 |
155 | @staticmethod
156 | def circle_intersect_circle(a, b):
157 | ac = a.c.vector
158 | bc = b.c.vector
159 | cross = ac.cross(bc)
160 | dot_a = ac.dot(a.x.vector)
161 | dot_b = bc.dot(b.x.vector)
162 | if abs(cross.z) > eps:
163 | a = cross.dot(cross)
164 | b = 2 * (dot_b * ac.cross(cross).z - dot_a * bc.cross(cross).z)
165 | c = dot_b ** 2 * (ac.x ** 2 + ac.y ** 2) - 2 * dot_a * dot_b * (ac.x * bc.x + ac.y * bc.y) + dot_a ** 2 * (
166 | bc.x ** 2 + bc.y ** 2) - cross.z ** 2
167 | s = math.sqrt(b ** 2 - 4 * a * c)
168 | z1 = (s - b) / (2 * a)
169 | x1 = (bc.y * dot_a - ac.y * dot_b + cross.x * z1) / cross.z
170 | y1 = (ac.x * dot_b - bc.x * dot_a + cross.y * z1) / cross.z
171 | z2 = -(s + b) / (2 * a)
172 | x2 = (bc.y * dot_a - ac.y * dot_b + cross.x * z2) / cross.z
173 | y2 = (ac.x * dot_b - bc.x * dot_a + cross.y * z2) / cross.z
174 | return Point(x1, y1, z1), Point(x2, y2, z2)
175 | else:
176 | return None
177 |
178 | ################################################################################
179 |
--------------------------------------------------------------------------------
/scot/eegtopo/projections.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013 Martin Billinger
4 |
5 | from numpy import arcsin, sin, cos, pi, sqrt, sum
6 | from numpy import atleast_2d, asarray, zeros, newaxis
7 |
8 |
9 | def project_radial_to2d(point_3d):
10 | point_2d = point_3d.copy()
11 | point_2d.z = 0
12 | beta = point_2d.norm()
13 | if beta == 0:
14 | alpha = 0
15 | else:
16 | alpha = arcsin(beta) / beta
17 |
18 | if point_3d.z < 0:
19 | alpha = pi / beta - alpha
20 |
21 | point_2d *= alpha
22 |
23 | return point_2d
24 |
25 |
26 | def project_radial_to3d(point_2d):
27 | alpha = point_2d.norm()
28 | if alpha == 0:
29 | beta = 1
30 | else:
31 | beta = sin(alpha) / alpha
32 | point_3d = point_2d * beta
33 | point_3d.z = cos(alpha)
34 | return point_3d
35 |
36 |
37 | def array_project_radial_to2d(points_3d):
38 | points_3d = atleast_2d(points_3d)
39 | points_2d = points_3d[:, 0:2]
40 |
41 | betas = sqrt(sum(points_2d**2, -1))
42 |
43 | alphas = zeros(betas.shape)
44 |
45 | mask = betas != 0
46 | alphas[mask] = arcsin(betas[mask]) / betas[mask]
47 |
48 | mask = points_3d[:, 2] < 0
49 | alphas[mask] = pi / betas[mask] - alphas[mask]
50 |
51 | return points_2d * alphas[:, newaxis]
52 |
53 |
54 | def array_project_radial_to3d(points_2d):
55 | points_2d = atleast_2d(points_2d)
56 |
57 | alphas = sqrt(sum(points_2d**2, -1))
58 |
59 | betas = sin(alphas) / alphas
60 | betas[alphas == 0] = 1
61 |
62 | x = points_2d[..., 0] * betas
63 | y = points_2d[..., 1] * betas
64 | z = cos(alphas)
65 |
66 | points_3d = asarray([x, y, z]).T
67 |
68 | return points_3d
69 |
--------------------------------------------------------------------------------
/scot/eegtopo/tools.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013 Martin Billinger
4 |
5 | class Struct(object):
6 | def __init__(self, content=None):
7 | if type(content) == dict:
8 | self.__dict__ = content
9 |
10 | def __getitem__(self, index):
11 | return self.__dict__[index]
12 |
13 | def __setitem__(self, index, value):
14 | self.__dict__[index] = value
15 |
16 | def __iter__(self):
17 | for i in self.__dict__:
18 | yield self[i]
19 |
20 | def keys(self):
21 | return self.__dict__.keys()
22 |
23 | def __len__(self):
24 | return len(self.__dict__)
25 |
26 | def __str__(self):
27 | longest = max([len(i) for i in self.__dict__])
28 | return '\n'.join(['%s : ' % i.rjust(longest) + str(self[i]) for i in self.__dict__])
29 |
30 | def __repr__(self):
31 | return 'Struct( %s )' % str(self.__dict__)
32 |
33 |
--------------------------------------------------------------------------------
/scot/eegtopo/topoplot.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013 Martin Billinger
4 |
5 | from __future__ import division
6 |
7 | import numpy as np
8 | from scipy.interpolate import interp1d
9 | from .projections import (array_project_radial_to3d,
10 | array_project_radial_to2d)
11 | from .geo_euclidean import Vector
12 | from scipy.spatial import ConvexHull
13 |
14 |
15 | class Topoplot(object):
16 | """ Creates 2D scalp maps. """
17 |
18 | def __init__(self, m=4, num_lterms=10, headcolor=[0, 0, 0, 1], clipping='head', electrodescale=1, interpolationrange=np.pi * 3 / 4, head_radius=np.pi * 3 / 4):
19 | import matplotlib.path as path
20 | self.interprange = interpolationrange
21 | self.head_radius = head_radius
22 | self.nose_angle = 15
23 | self.nose_length = 0.12
24 |
25 | self.headcolor = headcolor
26 |
27 | self.clipping = clipping
28 | self.electrodescale = np.asarray(electrodescale)
29 |
30 | verts = np.array([
31 | (1, 0),
32 | (1, 0.5535714285714286), (0.5535714285714286, 1), (0, 1),
33 | (-0.5535714285714286, 1), (-1, 0.5535714285714286), (-1, 0),
34 | (-1, -0.5535714285714286), (-0.5535714285714286, -1), (0, -1),
35 | (0.5535714285714286, -1), (1, -0.5535714285714286), (1, 0),
36 | ]) * self.head_radius
37 | codes = [path.Path.MOVETO,
38 | path.Path.CURVE4, path.Path.CURVE4, path.Path.CURVE4,
39 | path.Path.CURVE4, path.Path.CURVE4, path.Path.CURVE4,
40 | path.Path.CURVE4, path.Path.CURVE4, path.Path.CURVE4,
41 | path.Path.CURVE4, path.Path.CURVE4, path.Path.CURVE4,
42 | ]
43 | self.path_head = path.Path(verts, codes)
44 |
45 | x = self.head_radius * np.cos((90.0 - self.nose_angle / 2) * np.pi / 180.0)
46 | y = self.head_radius * np.sin((90.0 - self.nose_angle / 2) * np.pi / 180.0)
47 | verts = np.array([(x, y), (0, self.head_radius * (1 + self.nose_length)), (-x, y)])
48 | codes = [path.Path.MOVETO, path.Path.LINETO, path.Path.LINETO]
49 | self.path_nose = path.Path(verts, codes)
50 |
51 | self.legendre_factors = self.calc_legendre_factors(m, num_lterms)
52 |
53 | self.channel_fence = None
54 | self.locations = None
55 | self.g = None
56 | self.z = None
57 | self.c = None
58 | self.image = None
59 |
60 | self.g_map = {}
61 |
62 | @staticmethod
63 | def calc_legendre_factors(m, num_lterms):
64 | return [0] + [(2 * n + 1) / (n ** m * (n + 1) ** m * 4 * np.pi) for n in range(1, num_lterms + 1)]
65 |
66 | def calc_g(self, x):
67 | return np.polynomial.legendre.legval(x, self.legendre_factors)
68 |
69 | def set_locations(self, locations):
70 | n = len(locations)
71 |
72 | g = np.zeros((1 + n, 1 + n))
73 | g[:, 0] = np.ones(1 + n)
74 | g[-1, :] = np.ones(1 + n)
75 | g[-1, 0] = 0
76 | for i in range(n):
77 | for j in range(n):
78 | g[i, j + 1] = self.calc_g(np.dot(locations[i], locations[j]))
79 |
80 | self.channel_fence = None
81 | self.locations = locations
82 | self.g = g
83 |
84 | def set_values(self, z):
85 | self.z = z
86 | self.c = np.linalg.solve(self.g, np.concatenate((z, [0])))
87 |
88 | def get_map(self):
89 | return self.image
90 |
91 | def set_map(self, img):
92 | self.image = img
93 |
94 | def calc_gmap(self, pixels):
95 |
96 | try:
97 | return self.g_map[pixels]
98 | except KeyError:
99 | pass
100 |
101 | x = np.linspace(-self.interprange, self.interprange, pixels)
102 | y = np.linspace(self.interprange, -self.interprange, pixels)
103 |
104 | xy = np.transpose(np.meshgrid(x, y)) / self.electrodescale
105 |
106 | e = array_project_radial_to3d(xy)
107 |
108 | gmap = self.calc_g(e.dot(np.transpose(self.locations)))
109 | self.g_map[pixels] = gmap
110 | return gmap
111 |
112 | def create_map(self, pixels=32):
113 | gm = self.calc_gmap(pixels)
114 | self.image = gm.dot(self.c[1:]) + self.c[0]
115 |
116 | def plot_map(self, axes=None, crange=None, offset=(0,0)):
117 | if axes is None:
118 | import matplotlib.pyplot as plot
119 | axes = plot.gca()
120 | if crange is str:
121 | if crange.lower() == 'channels':
122 | crange = None
123 | elif crange.lower() in ['full', 'map']:
124 | vru = np.nanmax(np.abs(self.image))
125 | vrl = -vru
126 | if crange is None:
127 | vru = np.nanmax(np.abs(self.z))
128 | vrl = -vru
129 | else:
130 | vrl, vru = crange
131 | head = self.path_head.deepcopy()
132 | head.vertices += offset
133 |
134 | if self.clipping == 'head':
135 | clip_path = (head, axes.transData)
136 | elif self.clipping == 'electrodes':
137 | import matplotlib.path as path
138 | verts = self._get_fence() + offset
139 | codes = [path.Path.LINETO] * (len(verts) - 1)
140 | codes.insert(0, path.Path.MOVETO)
141 | clip_path = (path.Path(verts, codes), axes.transData)
142 | else:
143 | raise ValueError('unknown clipping mode: ', self.clipping)
144 |
145 | return axes.imshow(self.image, vmin=vrl, vmax=vru, clip_path=clip_path,
146 | extent=(offset[0]-self.interprange, offset[0]+self.interprange,
147 | offset[1]-self.interprange, offset[1]+self.interprange))
148 |
149 | def plot_locations(self, axes=None, offset=(0,0), fmt='k.', alpha=0.5):
150 | if axes is None:
151 | import matplotlib.pyplot as plot
152 | axes = plot.gca()
153 | p2 = array_project_radial_to2d(self.locations) * self.electrodescale + offset
154 | axes.plot(p2[:, 0], p2[:, 1], fmt, alpha=alpha, markersize=2)
155 |
156 | def plot_head(self, axes=None, offset=(0,0)):
157 | import matplotlib.patches as patches
158 | if axes is None:
159 | import matplotlib.pyplot as plot
160 | axes = plot.gca()
161 | head = self.path_head.deepcopy()
162 | nose = self.path_nose.deepcopy()
163 | head.vertices += offset
164 | nose.vertices += offset
165 | axes.add_patch(patches.PathPatch(head, facecolor='none', edgecolor=self.headcolor))
166 | axes.add_patch(patches.PathPatch(nose, facecolor='none', edgecolor=self.headcolor))
167 |
168 | def plot_circles(self, radius, axes=None, offset=(0,0)):
169 | import matplotlib.pyplot as plot
170 | if axes is None: axes = plot.gca()
171 | mx = np.max(np.abs(self.z))
172 | col = interp1d([-mx, 0, mx], [[0, 1, 1], [0, 1, 0], [1, 1, 0]])
173 | for i in range(len(self.locations)):
174 | p3 = self.locations[i]
175 | p2 = array_project_radial_to2d(p3) * self.electrodescale + offset
176 | circ = plot.Circle((p2[0, 0], p2[0, 1]), radius=radius, color=col(self.z[i]))
177 | axes.add_patch(circ)
178 |
179 | def _get_fence(self):
180 | if self.channel_fence is None:
181 | points = array_project_radial_to2d(self.locations) * self.electrodescale
182 | hull = ConvexHull(points)
183 | self.channel_fence = points[hull.vertices]
184 | return self.channel_fence
185 |
186 |
187 | def topoplot(values, locations, axes=None, offset=(0, 0), plot_locations=True,
188 | plot_head=True, **kwargs):
189 | """Wrapper function for :class:`Topoplot.
190 | """
191 | topo = Topoplot(**kwargs)
192 | topo.set_locations(locations)
193 | topo.set_values(values)
194 | topo.create_map()
195 | topo.plot_map(axes=axes, offset=offset)
196 | if plot_locations:
197 | topo.plot_locations(axes=axes, offset=offset)
198 | if plot_head:
199 | topo.plot_head(axes=axes, offset=offset)
200 | return topo
201 |
--------------------------------------------------------------------------------
/scot/eegtopo/warp_layout.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2015 Martin Billinger
4 |
5 | """
6 | Summary
7 | -------
8 | Provides functions to warp electrode layouts.
9 | """
10 |
11 | import numpy as np
12 | import scipy as sp
13 |
14 |
15 | def warp_locations(locations, y_center=None, return_ellipsoid=False, verbose=False):
16 | """ Warp EEG electrode locations to spherical layout.
17 |
18 | EEG Electrodes are warped to a spherical layout in three steps:
19 | 1. An ellipsoid is least-squares-fitted to the electrode locations.
20 | 2. Electrodes are displaced to the nearest point on the ellipsoid's surface.
21 | 3. The ellipsoid is transformed to a sphere, causing the new locations to lie exactly on a spherical surface
22 | with unit radius.
23 |
24 | This procedure intends to minimize electrode displacement in the original coordinate space. Simply projecting
25 | electrodes on a sphere (e.g. by normalizing the x/y/z coordinates) typically gives much larger displacements.
26 |
27 | Parameters
28 | ----------
29 | locations : array-like, shape = [n_electrodes, 3]
30 | Eeach row of `locations` corresponds to the location of an EEG electrode in cartesian x/y/z coordinates.
31 | y_center : float, optional
32 | Fix the y-coordinate of the ellipsoid's center to this value (optional). This is useful to align the ellipsoid
33 | with the central electrodes.
34 | return_ellipsoid : bool, optional
35 | If `true` center and radii of the ellipsoid are returned.
36 |
37 | Returns
38 | -------
39 | newlocs : array-like, shape = [n_electrodes, 3]
40 | Electrode locations on unit sphere.
41 | c : array-like, shape = [3], (only returned if `return_ellipsoid` evaluates to `True`)
42 | Center of the ellipsoid in the original location's coordinate space.
43 | r : array-like, shape = [3], (only returned if `return_ellipsoid` evaluates to `True`)
44 | Radii (x, y, z) of the ellipsoid in the original location's coordinate space.
45 | """
46 | locations = np.asarray(locations)
47 |
48 | if y_center is None:
49 | c, r = _fit_ellipsoid_full(locations)
50 | else:
51 | c, r = _fit_ellipsoid_partial(locations, y_center)
52 |
53 | elliptic_locations = _project_on_ellipsoid(c, r, locations)
54 |
55 | if verbose:
56 | print('Head ellipsoid center:', c)
57 | print('Head ellipsoid radii:', r)
58 | distance = np.sqrt(np.sum((locations - elliptic_locations)**2, axis=1))
59 | print('Minimum electrode displacement:', np.min(distance))
60 | print('Average electrode displacement:', np.mean(distance))
61 | print('Maximum electrode displacement:', np.max(distance))
62 |
63 | spherical_locations = (elliptic_locations - c) / r
64 |
65 | if return_ellipsoid:
66 | return spherical_locations, c, r
67 |
68 | return spherical_locations
69 |
70 |
71 | def _fit_ellipsoid_full(locations):
72 | """identify all 6 ellipsoid parametes (center, radii)"""
73 | a = np.hstack([locations*2, locations**2])
74 | lsq = sp.linalg.lstsq(a, np.ones(locations.shape[0]))
75 | x = lsq[0]
76 | c = -x[:3] / x[3:]
77 | gam = 1 + np.sum(x[:3]**2 / x[3:])
78 | r = np.sqrt(gam / x[3:])
79 | return c, r
80 |
81 |
82 | def _fit_ellipsoid_partial(locations, cy):
83 | """identify only 5 ellipsoid parameters (y-center determined by e.g. Cz)"""
84 | a = np.vstack([locations[:, 0]**2,
85 | locations[:, 1]**2 - 2 * locations[:, 1] * cy,
86 | locations[:, 2]**2,
87 | locations[:, 0]*2,
88 | locations[:, 2]*2]).T
89 | x = sp.linalg.lstsq(a, np.ones(locations.shape[0]))[0]
90 | c = [-x[3] / x[0], cy, -x[4] / x[2]]
91 | gam = 1 + x[3]**2 / x[0] + x[4]**2 / x[2]
92 | r = np.sqrt([gam / x[0], gam / x[1], gam / x[2]])
93 | return c, r
94 |
95 |
96 | def _project_on_ellipsoid(c, r, locations):
97 | """displace locations to the nearest point on ellipsoid surface"""
98 | p0 = locations - c # original locations
99 |
100 | l2 = 1 / np.sum(p0**2 / r**2, axis=1, keepdims=True)
101 | p = p0 * np.sqrt(l2) # initial approximation (projection of points towards center of ellipsoid)
102 |
103 | fun = lambda x: np.sum((x.reshape(p0.shape) - p0)**2) # minimize distance between new and old points
104 | con = lambda x: np.sum(x.reshape(p0.shape)**2 / r**2, axis=1) - 1 # new points constrained to surface of ellipsoid
105 | res = sp.optimize.minimize(fun, p.ravel(), constraints={'type': 'eq', 'fun': con}, method='SLSQP')
106 |
107 | return res['x'].reshape(p0.shape) + c
108 |
109 |
--------------------------------------------------------------------------------
/scot/external/__init__.py:
--------------------------------------------------------------------------------
1 | """ External sources and code snippets
2 | """
3 |
4 | # Files
5 | # =====
6 | #
7 | # infomax_.py : (Extended) Infomax ICA, implemented in MNE-python, https://github.com/mne-tools/mne-python/blob/16d5538d76bf08278aa95b8546e45f4a5c19d42b/mne/preprocessing/infomax_.py
8 |
9 |
--------------------------------------------------------------------------------
/scot/matfiles.py:
--------------------------------------------------------------------------------
1 | """
2 | Summary
3 | -------
4 | Routines for loading and saving Matlab's .mat files.
5 | """
6 |
7 | from scipy.io import loadmat as sploadmat
8 | from scipy.io import savemat as spsavemat
9 | from scipy.io import matlab
10 |
11 |
12 | def loadmat(filename):
13 | """This function should be called instead of direct spio.loadmat
14 | as it cures the problem of not properly recovering python dictionaries
15 | from mat files. It calls the function check keys to cure all entries
16 | which are still mat-objects
17 | """
18 | data = sploadmat(filename, struct_as_record=False, squeeze_me=True)
19 | return _check_keys(data)
20 |
21 |
22 | savemat = spsavemat
23 |
24 |
25 | def _check_keys(dictionary):
26 | """
27 | checks if entries in dictionary are mat-objects. If yes
28 | todict is called to change them to nested dictionaries
29 | """
30 | for key in dictionary:
31 | if isinstance(dictionary[key], matlab.mio5_params.mat_struct):
32 | dictionary[key] = _todict(dictionary[key])
33 | return dictionary
34 |
35 |
36 | def _todict(matobj):
37 | """
38 | a recursive function which constructs from matobjects nested dictionaries
39 | """
40 | dictionary = {}
41 | #noinspection PyProtectedMember
42 | for strg in matobj._fieldnames:
43 | elem = matobj.__dict__[strg]
44 | if isinstance(elem, matlab.mio5_params.mat_struct):
45 | dictionary[strg] = _todict(elem)
46 | else:
47 | dictionary[strg] = elem
48 | return dictionary
49 |
50 |
--------------------------------------------------------------------------------
/scot/parallel.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2014 SCoT Development Team
4 |
5 | from __future__ import print_function
6 |
7 |
8 | def parallel_loop(func, n_jobs=1, verbose=1):
9 | """run loops in parallel, if joblib is available.
10 |
11 | Parameters
12 | ----------
13 | func : function
14 | function to be executed in parallel
15 | n_jobs : int | None
16 | Number of jobs. If set to None, do not attempt to use joblib.
17 | verbose : int
18 | verbosity level
19 |
20 | Notes
21 | -----
22 | Execution of the main script must be guarded with `if __name__ == '__main__':` when using parallelization.
23 | """
24 | if n_jobs:
25 | try:
26 | from joblib import Parallel, delayed
27 | except ImportError:
28 | try:
29 | from sklearn.externals.joblib import Parallel, delayed
30 | except ImportError:
31 | n_jobs = None
32 |
33 | if not n_jobs:
34 | if verbose:
35 | print('running ', func, ' serially')
36 | par = lambda x: list(x)
37 | else:
38 | if verbose:
39 | print('running ', func, ' in parallel')
40 | func = delayed(func)
41 | par = Parallel(n_jobs=n_jobs, verbose=verbose)
42 |
43 | return par, func
44 |
--------------------------------------------------------------------------------
/scot/pca.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2016 SCoT Development Team
4 |
5 | """Principal component analysis (PCA) implementation."""
6 |
7 | import numpy as np
8 | from .datatools import cat_trials
9 |
10 |
11 | def pca_svd(x):
12 | """Calculate PCA using SVD.
13 |
14 | Parameters
15 | ----------
16 | x : ndarray, shape (channels, samples)
17 | Two-dimensional input data.
18 |
19 | Returns
20 | -------
21 | w : ndarray, shape (channels, channels)
22 | Eigenvectors (principal components) (in columns).
23 | s : ndarray, shape (channels,)
24 | Eigenvalues.
25 | """
26 | w, s, _ = np.linalg.svd(x, full_matrices=False)
27 | return w, s ** 2
28 |
29 |
30 | def pca_eig(x):
31 | """Calculate PCA using eigenvalue decomposition.
32 |
33 | Parameters
34 | ----------
35 | x : ndarray, shape (channels, samples)
36 | Two-dimensional input data.
37 |
38 | Returns
39 | -------
40 | w : ndarray, shape (channels, channels)
41 | Eigenvectors (principal components) (in columns).
42 | s : ndarray, shape (channels,)
43 | Eigenvalues.
44 | """
45 | s, w = np.linalg.eigh(x.dot(x.T))
46 | return w, s
47 |
48 |
49 | def pca(x, subtract_mean=False, normalize=False, sort_components=True,
50 | reducedim=None, algorithm=pca_eig):
51 | """Calculate principal component analysis (PCA).
52 |
53 | Parameters
54 | ----------
55 | x : ndarray, shape (trials, channels, samples) or (channels, samples)
56 | Input data.
57 | subtract_mean : bool, optional
58 | Subtract sample mean from x.
59 | normalize : bool, optional
60 | Normalize variances before applying PCA.
61 | sort_components : bool, optional
62 | Sort principal components in order of decreasing eigenvalues.
63 | reducedim : float or int or None, optional
64 | A value less than 1 is interpreted as the fraction of variance that
65 | should be retained in the data. All components that account for less
66 | than `1 - reducedim` of the variance are removed.
67 | An integer value of 1 or greater is interpreted as the number of
68 | (sorted) components to retain.
69 | If None, do not reduce dimensionality (i.e. keep all components).
70 | algorithm : func, optional
71 | Function to use for eigenvalue decomposition
72 | (:func:`pca_eig` or :func:`pca_svd`).
73 |
74 | Returns
75 | -------
76 | w : ndarray, shape (channels, components)
77 | PCA transformation matrix.
78 | v : ndarray, shape (components, channels)
79 | Inverse PCA transformation matrix.
80 | """
81 |
82 | x = np.asarray(x)
83 | if x.ndim == 3:
84 | x = cat_trials(x)
85 |
86 | if reducedim:
87 | sort_components = True
88 |
89 | if subtract_mean:
90 | x = x - np.mean(x, axis=1, keepdims=True)
91 |
92 | k, l = None, None
93 | if normalize:
94 | l = np.std(x, axis=1, ddof=1)
95 | k = np.diag(1.0 / l)
96 | l = np.diag(l)
97 | x = np.dot(k, x)
98 |
99 | w, latent = algorithm(x)
100 |
101 | # PCA is just a rotation, so inverse is equal to transpose
102 | v = w.T
103 |
104 | if normalize:
105 | w = np.dot(k, w)
106 | v = np.dot(v, l)
107 |
108 | latent /= sum(latent)
109 |
110 | if sort_components:
111 | order = np.argsort(latent)[::-1]
112 | w = w[:, order]
113 | v = v[order, :]
114 | latent = latent[order]
115 |
116 | if reducedim is not None:
117 | if reducedim < 1:
118 | selected = np.nonzero(np.cumsum(latent) < reducedim)[0]
119 | try:
120 | selected = np.concatenate([selected, [selected[-1] + 1]])
121 | except IndexError:
122 | selected = [0]
123 | if selected[-1] >= w.shape[1]:
124 | selected = selected[0:-1]
125 | w = w[:, selected]
126 | v = v[selected, :]
127 | else:
128 | w = w[:, :reducedim]
129 | v = v[:reducedim, :]
130 |
131 | return w, v
132 |
--------------------------------------------------------------------------------
/scot/plainica.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2015 SCoT Development Team
4 |
5 | """ Source decomposition with ICA.
6 | """
7 |
8 | import numpy as np
9 |
10 | from . import backend as scotbackend
11 | from .datatools import cat_trials, atleast_3d
12 |
13 |
14 | class ResultICA(object):
15 | """ Result of :func:`plainica`
16 |
17 | Attributes
18 | ----------
19 | `mixing` : array
20 | estimate of the mixing matrix
21 | `unmixing` : array
22 | estimate of the unmixing matrix
23 | """
24 | def __init__(self, mx, ux):
25 | self.mixing = mx
26 | self.unmixing = ux
27 |
28 |
29 | def plainica(x, reducedim=0.99, backend=None, random_state=None):
30 | """ Source decomposition with ICA.
31 |
32 | Apply ICA to the data x, with optional PCA dimensionality reduction.
33 |
34 | Parameters
35 | ----------
36 | x : array, shape (n_trials, n_channels, n_samples) or (n_channels, n_samples)
37 | data set
38 | reducedim : {int, float, 'no_pca'}, optional
39 | A number of less than 1 in interpreted as the fraction of variance that should remain in the data. All
40 | components that describe in total less than `1-reducedim` of the variance are removed by the PCA step.
41 | An integer numer of 1 or greater is interpreted as the number of components to keep after applying the PCA.
42 | If set to 'no_pca' the PCA step is skipped.
43 | backend : dict-like, optional
44 | Specify backend to use. When set to None the backend configured in config.backend is used.
45 |
46 | Returns
47 | -------
48 | result : ResultICA
49 | Source decomposition
50 | """
51 |
52 | x = atleast_3d(x)
53 | t, m, l = np.shape(x)
54 |
55 | if backend is None:
56 | backend = scotbackend
57 |
58 | # pre-transform the data with PCA
59 | if reducedim == 'no pca':
60 | c = np.eye(m)
61 | d = np.eye(m)
62 | xpca = x
63 | else:
64 | c, d, xpca = backend['pca'](x, reducedim)
65 |
66 | # run on residuals ICA to estimate volume conduction
67 | mx, ux = backend['ica'](cat_trials(xpca), random_state=random_state)
68 |
69 | # correct (un)mixing matrix estimatees
70 | mx = mx.dot(d)
71 | ux = c.dot(ux)
72 |
73 | class Result:
74 | unmixing = ux
75 | mixing = mx
76 |
77 | return Result
78 |
--------------------------------------------------------------------------------
/scot/scot.ini:
--------------------------------------------------------------------------------
1 | # This is the basis configuration file for scot, and the
2 | # only configuration file that is guaranteed to be loaded.
3 | # Here, all options must be set to some default values.
4 |
5 | [scot]
6 | backend = builtin
7 | verbose = False
8 | data = ~/scot_data
9 |
--------------------------------------------------------------------------------
/scot/tests/__init__.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013 SCoT Development Team
4 |
--------------------------------------------------------------------------------
/scot/tests/test_connectivity.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2015 SCoT Development Team
4 |
5 | import unittest
6 |
7 | import numpy as np
8 |
9 | from scot import connectivity
10 |
11 | from numpy.testing import assert_array_almost_equal
12 | from numpy.testing import assert_array_equal
13 |
14 |
15 | def assert_zerostructure(a, b):
16 | assert_array_equal(np.isclose(a, 0), np.isclose(b, 0))
17 |
18 |
19 | class TestFunctionality(unittest.TestCase):
20 | def setUp(self):
21 | pass
22 |
23 | def tearDown(self):
24 | pass
25 |
26 | def testFunction(self):
27 | # Three sources: a <- b <- c
28 | # simply test if connectivity measures are 0 where expected
29 | b0 = np.array([[0, 0.9, 0], [0, 0, 0.9], [0, 0, 0]])
30 | identity = np.eye(3)
31 | nfft = 5
32 | measures = ['A', 'H', 'COH', 'DTF', 'PDC']
33 | C = connectivity.Connectivity(b=b0, c=identity, nfft=nfft)
34 | c = connectivity.connectivity(measures, b=b0, c=identity, nfft=nfft)
35 | for m in measures:
36 | self.assertTrue(np.all(c[m] == getattr(C, m)()))
37 |
38 | def testClass(self):
39 | # Three sources: a <- b <-c
40 | # simply test the structure of resulting connectivity measures
41 | b0 = np.array([[0, 0.9, 0], [0, 0, 0.9], [0, 0, 0]])
42 | identity = np.eye(3)
43 | nfft = 5
44 | c = connectivity.Connectivity(b=b0, c=identity, nfft=nfft)
45 | k = lambda x: np.sum(np.abs(x), 2)
46 | l = lambda x: np.sum(x, 2)
47 | # a should have the same structure as b
48 | assert_zerostructure(k(c.A()), b0 + identity)
49 | self.assertFalse(np.allclose(k(c.A()), k(c.A()).T))
50 | # H should be upper triangular
51 | self.assertTrue(np.allclose(np.tril(k(c.H()), -1), 0))
52 | self.assertFalse(np.all(k(c.H()) == k(c.H()).T))
53 | # S should be a full matrix and symmetric
54 | self.assertTrue(np.all(k(c.S()) > 0))
55 | self.assertTrue(np.allclose(k(c.S()), k(c.S()).T))
56 | # g should be nonzero for direct connections only and symmetric in
57 | # magnitude
58 | self.assertEqual(k(c.G())[0, 2], 0)
59 | self.assertTrue(np.allclose(k(c.G()), k(c.G()).T))
60 | # Phase should be zero along the diagonal
61 | self.assertTrue(np.allclose(k(c.PHI()).diagonal(), 0))
62 | # Phase should be antisymmetric
63 | self.assertTrue(np.allclose(l(c.PHI()), -l(c.PHI()).T))
64 | # Coherence should be 1 over all frequencies along the diagonal
65 | self.assertTrue(np.allclose(k(c.COH()).diagonal(), nfft))
66 | self.assertLessEqual(np.max(np.abs(c.COH())), 1)
67 | # pCOH should be nonzero for direct connections only and symmetric in
68 | # magnitude
69 | self.assertEqual(k(c.pCOH())[0, 2], 0)
70 | self.assertTrue(np.allclose(k(c.pCOH()), k(c.pCOH()).T))
71 | # PDC should have the same structure as b
72 | assert_zerostructure(k(c.PDC()), b0 + identity)
73 | self.assertFalse(np.allclose(l(c.PDC()), l(c.PDC()).T))
74 | # final sink should be 1 over all frequencies
75 | self.assertEqual(l(c.PDC())[0, 0], nfft)
76 | # sources with equal outgoing connections should be equal
77 | self.assertEqual(l(c.PDC())[1, 1], l(c.PDC())[2, 2])
78 | # equal connections in b should be equal
79 | self.assertEqual(l(c.PDC())[0, 1], l(c.PDC())[1, 2])
80 | # ffPDC should have the same structure as b
81 | assert_zerostructure(k(c.ffPDC()), b0 + identity)
82 | self.assertFalse(np.allclose(l(c.ffPDC()), l(c.ffPDC()).T))
83 | # sources with equal outgoing connections should be equal
84 | self.assertEqual(l(c.ffPDC())[1, 1], l(c.ffPDC())[2, 2])
85 | # equal connections in b should be equal
86 | self.assertEqual(l(c.ffPDC())[0, 1], l(c.ffPDC())[1, 2])
87 | # sPDC should be the square of the PDC
88 | self.assertTrue(np.allclose(c.PDC()**2, c.sPDC()))
89 | # sPDC should have the same structure as b
90 | assert_zerostructure(k(c.sPDC()), b0 + identity)
91 | self.assertFalse(np.allclose(l(c.sPDC()), l(c.sPDC()).T))
92 | # final sink should be 1 over all frequencies
93 | self.assertEqual(l(c.sPDC())[0, 0], nfft)
94 | # sources with equal outgoing connections should be equal
95 | self.assertEqual(l(c.sPDC())[1, 1], l(c.sPDC())[2, 2])
96 | # equal connections in b should be equal
97 | self.assertEqual(l(c.sPDC())[0, 1], l(c.sPDC())[1, 2])
98 | # PDCF should equal PDC for identity noise covariance
99 | self.assertTrue(np.allclose(c.PDC(), c.PDCF()))
100 | # GPDC should equal PDC for identity noise covariance
101 | self.assertTrue(np.allclose(c.PDC(), c.GPDC()))
102 | # DTF should be upper triangular
103 | self.assertTrue(np.allclose(np.tril(k(c.DTF()), -1), 0))
104 | self.assertFalse(np.allclose(k(c.DTF()), k(c.DTF()).T))
105 | # first source should be 1 over all frequencies
106 | self.assertEqual(l(c.DTF())[2, 2], nfft)
107 | # ffDTF should be upper triangular
108 | self.assertTrue(np.allclose(np.tril(k(c.ffDTF()), -1), 0))
109 | self.assertFalse(np.allclose(k(c.ffDTF()), k(c.ffDTF()).T))
110 | # dDTF should have the same structure as b,
111 | assert_zerostructure(k(c.dDTF()), b0 + identity)
112 | self.assertFalse(np.allclose(l(c.dDTF()), l(c.dDTF()).T))
113 | # GDTF should equal DTF for identity noise covariance
114 | self.assertTrue(np.allclose(c.DTF(), c.GDTF()))
115 |
--------------------------------------------------------------------------------
/scot/tests/test_csp.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2015 SCoT Development Team
4 |
5 | import unittest
6 |
7 | import numpy as np
8 | from numpy.testing import assert_allclose
9 |
10 | from scot.datatools import dot_special
11 | from scot.csp import csp
12 |
13 | try:
14 | from generate_testdata import generate_covsig
15 | except ImportError:
16 | from .generate_testdata import generate_covsig
17 |
18 | epsilon = 1e-10
19 |
20 |
21 | class TestFunctionality(unittest.TestCase):
22 | def setUp(self):
23 | pass
24 |
25 | def tearDown(self):
26 | pass
27 |
28 | def testComponentSeparation(self):
29 | A = generate_covsig([[10,5,2],[5,10,2],[2,2,10]], 500)
30 | B = generate_covsig([[10,2,2],[2,10,5],[2,5,10]], 500)
31 |
32 | X = np.concatenate([A[np.newaxis], B[np.newaxis]], axis=0)
33 | W, V = csp(X, [1, 2])
34 | C1a = np.cov(np.dot(W.T, X[0, :, :]))
35 | C2a = np.cov(np.dot(W.T, X[1, :, :]))
36 |
37 | Y = np.concatenate([B[np.newaxis], A[np.newaxis]], axis=0)
38 | W, V = csp(Y, [1, 2])
39 | C1b = np.cov(np.dot(W.T, Y[0, :, :]))
40 | C2b = np.cov(np.dot(W.T, Y[1, :, :]))
41 |
42 | # check symmetric case
43 | assert_allclose(C1a.diagonal(), C2a.diagonal()[::-1])
44 | assert_allclose(C1b.diagonal(), C2b.diagonal()[::-1])
45 |
46 | # swapping class labels (or in this case, trials) should not change the result
47 | assert_allclose(C1a, C1b, rtol=1e-9, atol=1e-9)
48 | assert_allclose(C2a, C2b, rtol=1e-9, atol=1e-9)
49 |
50 | # variance of first component should be greatest for class 1
51 | self.assertGreater(C1a[0, 0], C2a[0, 0])
52 |
53 | # variance of last component should be greatest for class 1
54 | self.assertLess(C1a[2, 2], C2a[2, 2])
55 |
56 | # variance of central component should be equal for both classes
57 | assert_allclose(C1a[1, 1], C2a[1, 1])
58 |
59 |
60 | class TestDefaults(unittest.TestCase):
61 |
62 | def setUp(self):
63 | self.X = np.random.randn(10,5,100)
64 | self.C = [0,0,0,0,0,1,1,1,1,1]
65 | self.Y = self.X.copy()
66 | self.D = list(self.C)
67 | self.T, self.M, self.N = self.X.shape
68 | self.W, self.V = csp(self.X, self.C)
69 |
70 | def tearDown(self):
71 | pass
72 |
73 | def testInvalidInput(self):
74 | # pass only 2d data
75 | self.assertRaises(AttributeError, csp, np.random.randn(3,10), [1,1,0,0] )
76 |
77 | # number of class labels does not match number of trials
78 | self.assertRaises(AttributeError, csp, np.random.randn(5,3,10), [1,1,0,0] )
79 |
80 | def testInputSafety(self):
81 | # function must not change input variables
82 | self.assertTrue((self.X == self.Y).all())
83 | self.assertEqual(self.C, self.D)
84 |
85 | def testOutputSizes(self):
86 | # output matrices must have the correct size
87 | self.assertTrue(self.W.shape == (self.M, self.M))
88 | self.assertTrue(self.V.shape == (self.M, self.M))
89 |
90 | def testInverse(self):
91 | # V should be the inverse of W
92 | I = np.abs(self.V.dot(self.W))
93 |
94 | self.assertTrue(np.abs(np.mean(I.diagonal())) - 1 < epsilon)
95 | self.assertTrue(np.abs(np.sum(I) - I.trace()) < epsilon)
96 |
97 |
98 | class TestDimensionalityReduction(unittest.TestCase):
99 |
100 | def setUp(self):
101 | self.n_comps = 5
102 | self.X = np.random.rand(10,6,100)
103 | self.C = np.asarray([0,0,0,0,0,1,1,1,1,1])
104 | self.X[self.C == 0, 0, :] *= 10
105 | self.X[self.C == 0, 2, :] *= 5
106 | self.X[self.C == 1, 1, :] *= 10
107 | self.X[self.C == 1, 3, :] *= 2
108 | self.Y = self.X.copy()
109 | self.D = list(self.C)
110 | self.T, self.M, self.N = self.X.shape
111 | self.W, self.V = csp(self.X, self.C, numcomp=self.n_comps)
112 |
113 | def tearDown(self):
114 | pass
115 |
116 | def testOutputSizes(self):
117 | # output matrices must have the correct size
118 | self.assertTrue(self.W.shape == (self.M, 5))
119 | self.assertTrue(self.V.shape == (5, self.M))
120 |
121 | def testPseudoInverse(self):
122 | # V should be the pseudo inverse of W
123 | I = self.V.dot(self.W)
124 | assert_allclose(I, np.eye(self.n_comps), rtol=1e-9, atol=1e-9)
125 |
126 | def testOutput(self):
127 | x = dot_special(self.W.T, self.X)
128 | v1 = sum(np.var(x[np.array(self.C)==0], axis=2))
129 | v2 = sum(np.var(x[np.array(self.C)==1], axis=2))
130 | self.assertGreater(v1[0], v2[0])
131 | self.assertGreater(v1[1], v2[1])
132 | self.assertLess(v1[-2], v2[-2])
133 | self.assertLess(v1[-1], v2[-1])
134 |
--------------------------------------------------------------------------------
/scot/tests/test_datatools.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2015 SCoT Development Team
4 |
5 | from __future__ import division
6 |
7 | import unittest
8 | import numpy as np
9 |
10 | from scot import datatools
11 |
12 |
13 | class TestDataMangling(unittest.TestCase):
14 | def setUp(self):
15 | pass
16 |
17 | def tearDown(self):
18 | pass
19 |
20 | def test_cut_epochs(self):
21 | triggers = [100, 200, 300, 400, 500, 600, 700, 800, 900]
22 | rawdata = np.random.randn(5, 1000)
23 | rawcopy = rawdata.copy()
24 |
25 | start, stop = -10, 50
26 | x = datatools.cut_segments(rawdata, triggers, start, stop)
27 | self.assertTrue(np.all(rawdata == rawcopy))
28 | self.assertEqual(x.shape, (len(triggers), rawdata.shape[0], stop - start))
29 |
30 | # test if it works with float indices
31 | start, stop = -10.0, 50.0
32 | x = datatools.cut_segments(rawdata, triggers, start, stop)
33 | self.assertEqual(x.shape, (len(triggers), x.shape[1], int(stop) - int(start)))
34 |
35 | self.assertRaises(ValueError, datatools.cut_segments,
36 | rawdata, triggers, 0, 10.001)
37 | self.assertRaises(ValueError, datatools.cut_segments,
38 | rawdata, triggers, -10.1, 50)
39 |
40 | for it in range(len(triggers)):
41 | a = rawdata[:, triggers[it] + int(start): triggers[it] + int(stop)]
42 | b = x[it, :, :]
43 | self.assertTrue(np.all(a == b))
44 |
45 | def test_cat_trials(self):
46 | x = np.random.randn(9, 5, 60)
47 | xc = x.copy()
48 |
49 | y = datatools.cat_trials(x)
50 |
51 | self.assertTrue(np.all(x == xc))
52 | self.assertEqual(y.shape, (x.shape[1], x.shape[0] * x.shape[2]))
53 |
54 | for it in range(x.shape[0]):
55 | a = y[:, it * x.shape[2]: (it + 1) * x.shape[2]]
56 | b = x[it, :, :]
57 | self.assertTrue(np.all(a == b))
58 |
59 | def test_dot_special(self):
60 | x = np.random.randn(9, 5, 60)
61 | a = np.eye(5) * 2.0
62 |
63 | xc = x.copy()
64 | ac = a.copy()
65 |
66 | y = datatools.dot_special(a, x)
67 |
68 | self.assertTrue(np.all(x == xc))
69 | self.assertTrue(np.all(a == ac))
70 | self.assertTrue(np.all(x * 2 == y))
71 |
72 | x = np.random.randn(150, 40, 6)
73 | a = np.ones((7, 40))
74 | y = datatools.dot_special(a, x)
75 | self.assertEqual(y.shape, (150, 7, 6))
76 |
77 | def test_acm_1d(self):
78 | """Test autocorrelation matrix for 1D input"""
79 | v = np.array([1, 2, 0, 0, 1, 2, 0, 0])
80 | acm = lambda l: datatools.acm(v, l)
81 |
82 | self.assertEqual(np.mean(v**2), acm(0))
83 | for l in range(1, 6):
84 | self.assertEqual(np.correlate(v[l:], v[:-l]) / (len(v) - l),
85 | acm(l))
86 |
87 |
88 | class TestRegressions(unittest.TestCase):
89 | def setUp(self):
90 | pass
91 |
92 | def tearDown(self):
93 | pass
94 |
95 | def test_cat_trials_dimensions(self):
96 | """cat_trials did not always return a 2d array."""
97 | self.assertEqual(datatools.cat_trials(np.random.randn(2, 2, 100)).ndim, 2)
98 | self.assertEqual(datatools.cat_trials(np.random.randn(1, 2, 100)).ndim, 2)
99 | self.assertEqual(datatools.cat_trials(np.random.randn(2, 1, 100)).ndim, 2)
100 | self.assertEqual(datatools.cat_trials(np.random.randn(1, 1, 100)).ndim, 2)
101 |
--------------------------------------------------------------------------------
/scot/tests/test_eegtopo/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'billinger'
2 |
--------------------------------------------------------------------------------
/scot/tests/test_eegtopo/test_geometry/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'billinger'
2 |
--------------------------------------------------------------------------------
/scot/tests/test_eegtopo/test_geometry/test_euclidean.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2014 SCoT Development Team
4 |
5 | from __future__ import division
6 |
7 | import unittest
8 | from math import sqrt
9 |
10 | import numpy as np
11 |
12 | from scot.eegtopo.geo_euclidean import Vector
13 |
14 |
15 | class TestVector(unittest.TestCase):
16 | def setUp(self):
17 | pass
18 |
19 | def tearDown(self):
20 | pass
21 |
22 | def test_access(self):
23 | # Test __iter__ and __init__
24 |
25 | self.assertEqual(list(Vector()), [0, 0, 0])
26 | self.assertEqual(list(Vector(1, 2, 3)), [1, 2, 3])
27 | self.assertEqual(list(Vector(x=-4, y=-5, z=-6)), [-4, -5, -6])
28 |
29 | # Test alternative initialization
30 | self.assertEqual(list(Vector.fromiterable([7, 8, 9])), [7, 8, 9])
31 | self.assertEqual(list(Vector.fromvector(Vector(1, 2, 3))), [1, 2, 3])
32 |
33 | # Test __getitem__
34 | self.assertEqual(Vector(x=1)[0], 1)
35 | self.assertEqual(Vector(y=1)[1], 1)
36 | self.assertEqual(Vector(z=1)[2], 1)
37 |
38 | # Test __getattr__
39 | self.assertEqual(Vector(x=1).x, 1)
40 | self.assertEqual(Vector(y=1).y, 1)
41 | self.assertEqual(Vector(z=1).z, 1)
42 |
43 | # Test item assignment
44 | v = Vector()
45 | v[0], v[1], v[2] = 3, 4, 5
46 | self.assertEqual(list(v), [3, 4, 5])
47 |
48 | v.x, v.y, v.z = 6, 7, 8
49 | self.assertEqual(list(v), [6, 7, 8])
50 |
51 | # Test __repr__
52 | self.assertEqual(eval(repr(Vector(1, 2, 3))), Vector(1, 2, 3))
53 |
54 | # Basic Math
55 | self.assertEqual(Vector(1, 2, 3) + Vector(4, 5, 6), Vector(5, 7, 9))
56 | self.assertEqual(Vector(4, 5, 6) - Vector(1, 2, 3), Vector(3, 3, 3))
57 | self.assertEqual(Vector(1, 2, 3) * Vector(5, 4, 3), Vector(5, 8, 9))
58 | self.assertEqual(Vector(9, 8, 7) / Vector(3, 2, 1), Vector(3, 4, 7))
59 | self.assertEqual(Vector(1, 2, 3) + 1, Vector(2, 3, 4))
60 | self.assertEqual(Vector(4, 5, 6) - 1, Vector(3, 4, 5))
61 | self.assertEqual(Vector(1, 2, 3) * 2, Vector(2, 4, 6))
62 | self.assertEqual(Vector(4, 5, 6) / 2, Vector(2, 2.5, 3))
63 |
64 | # Inplace Math
65 | v = Vector(1, 1, 1)
66 | v += Vector(1, 2, 3)
67 | self.assertEqual(v, Vector(2, 3, 4))
68 | v -= Vector(-1, 1, 1)
69 | self.assertEqual(v, Vector(3, 2, 3))
70 | v *= Vector(1, 2, 3)
71 | self.assertEqual(v, Vector(3, 4, 9))
72 | v /= Vector(3, 2, 3)
73 | self.assertEqual(v, Vector(1, 2, 3))
74 | v -= 1
75 | self.assertEqual(v, Vector(0, 1, 2))
76 | v += 2
77 | self.assertEqual(v, Vector(2, 3, 4))
78 | v *= 2
79 | self.assertEqual(v, Vector(4, 6, 8))
80 | v /= 2
81 | self.assertEqual(v, Vector(2, 3, 4))
82 |
83 | # Vector Math
84 | self.assertEqual(Vector(1, 2, 3).dot(Vector(2, 2, 2)), 12)
85 | self.assertEqual(Vector(2, 0, 0).cross(Vector(0, 3, 0)), Vector(0, 0, 6))
86 | self.assertEqual(Vector(1, 2, 3).norm2(), 14)
87 | self.assertEqual(Vector(1, 2, 3).norm(), sqrt(14))
88 | self.assertTrue(np.allclose(Vector(8, 3, 9).normalize().norm2(), 1))
89 | self.assertTrue(np.allclose(Vector(-3, 1, 0).normalized().norm2(), 1))
90 |
91 | v = Vector(1, 0, 0)
92 | self.assertTrue(v.rotated(0.0*np.pi, Vector(0, 0, 1)).close(Vector(1, 0, 0)))
93 | self.assertTrue(v.rotated(0.5*np.pi, Vector(0, 0, 1)).close(Vector(0, 1, 0)))
94 | self.assertTrue(v.rotated(1.0*np.pi, Vector(0, 0, 1)).close(Vector(-1, 0, 0)))
95 | self.assertTrue(v.rotated(1.5*np.pi, Vector(0, 0, 1)).close(Vector(0, -1, 0)))
96 | self.assertTrue(v.rotated(2.0*np.pi, Vector(0, 0, 1)).close(Vector(1, 0, 0)))
97 |
98 | self.assertTrue(v.rotate(0.5*np.pi, Vector(0, 0, 1)).close(Vector(0, 1, 0)))
99 | self.assertTrue(v.rotate(0.5*np.pi, Vector(0, 0, 1)).close(Vector(-1, 0, 0)))
100 | self.assertTrue(v.rotate(0.5*np.pi, Vector(0, 0, 1)).close(Vector(0, -1, 0)))
101 | self.assertTrue(v.rotate(0.5*np.pi, Vector(0, 0, 1)).close(Vector(1, 0, 0)))
102 |
--------------------------------------------------------------------------------
/scot/tests/test_eegtopo/test_geometry/test_spherical.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2014 SCoT Development Team
4 |
5 | from __future__ import division
6 |
7 | import unittest
8 |
9 | import numpy as np
10 |
11 | from scot.eegtopo.geo_euclidean import Vector
12 | from scot.eegtopo.geo_spherical import Point, Line, Circle, Construct
13 |
14 |
15 | class TestClasses(unittest.TestCase):
16 | def setUp(self):
17 | pass
18 |
19 | def tearDown(self):
20 | pass
21 |
22 | def testPoint(self):
23 | # Points must alway lie on unit sphere
24 | self.assertEqual(Point().vector.norm2(), 1)
25 | self.assertEqual(Point(1, 2, 3).vector.norm2(), 1)
26 | self.assertTrue(np.allclose(Point(10, -20, 30).vector.norm2(), 1))
27 | self.assertTrue(np.allclose(Point(100, 200, -300).vector.norm2(), 1))
28 | self.assertTrue(np.allclose(Point(-100000.0, 2, 300).vector.norm2(), 1))
29 |
30 | self.assertEqual(Point(1, 0, 0).distance(Point(0, 1, 0)), 0.5*np.pi)
31 | self.assertEqual(Point(1, 0, 0).distance(Point(0, 0, 1)), 0.5*np.pi)
32 | self.assertEqual(Point(0, 1, 0).distance(Point(0, 0, 1)), 0.5*np.pi)
33 | self.assertEqual(Point(0, 1, 0).distance(Point(1, 0, 0)), 0.5*np.pi)
34 | self.assertEqual(Point(0, 0, 1).distance(Point(1, 0, 0)), 0.5*np.pi)
35 |
36 | self.assertEqual(Point(1, 0, 0).distance(Point(-1, 0, 0)), np.pi)
37 |
38 | def testLine(self):
39 | self.assertTrue(Line(Point(1, 0, 0), Point(0, 1, 0)).get_point(0).vector.close(Vector(1, 0, 0)))
40 | self.assertTrue(Line(Point(1, 0, 0), Point(0, 1, 0)).get_point(1).vector.close(Vector(0, 1, 0)))
41 | self.assertTrue(Line(Point(1, 0, 0), Point(0, 1, 0)).get_point(2).vector.close(Vector(-1, 0, 0)))
42 | self.assertTrue(Line(Point(1, 0, 0), Point(0, 1, 0)).get_point(3).vector.close(Vector(0, -1, 0)))
43 | self.assertTrue(Line(Point(1, 0, 0), Point(0, 1, 0)).get_point(4).vector.close(Vector(1, 0, 0)))
44 |
45 | self.assertEqual(Line(Point(1, 0, 0), Point(0, 1, 0)).distance(Point(0, 0, 1)), 0.5*np.pi)
46 |
47 | def testCircle(self):
48 | self.assertEqual(Circle(Point(1, 0, 0), Point(0, 1, 0)).get_radius(), 0.5*np.pi) # circle radius measured on the surface
49 | self.assertEqual(Circle(Point(1, 0, 0), Point(0, 1, 0), Point(0, -1, 0)).get_radius(), 0.5*np.pi) # circle radius measured on the surface
50 |
51 | self.assertEqual(Circle(Point(1, 0, 0), Point(0, 1, 0)).angle(Point(0, 1, 0)), 0)
52 | self.assertEqual(Circle(Point(1, 0, 0), Point(0, 1, 0)).angle(Point(1, 0, 0)), 0.5*np.pi)
53 | self.assertEqual(Circle(Point(1, 0, 0), Point(0, 1, 0)).angle(Point(-1, 0, 0)), 0.5*np.pi)
54 | self.assertEqual(Circle(Point(1, 0, 0), Point(0, 1, 0)).angle(Point(0, -1, 0)), np.pi)
55 |
56 | self.assertEqual(Circle(Point(1, 0, 0), Point(0, 1, 0)).angle(Point(0, 0, -1)), 0.5*np.pi)
57 | self.assertEqual(Circle(Point(1, 0, 0), Point(0, 1, 0)).angle(Point(0, 0, -1)), 0.5*np.pi)
58 |
59 | def testConstruct(self):
60 | self.assertTrue(Construct.midpoint(Point(1, 0, 0), Point(-1, 1e-10, 0)).vector.close(Vector(0, 1, 0)))
61 | self.assertTrue(Construct.midpoint(Point(1, 0, 0), Point(0, 0, 1)).vector.close(Point(1, 0, 1).vector))
62 |
63 | a = Line(Point(1, 0, 0), Point(0, 1, 0))
64 | b = Line(Point(1, 0, 0), Point(0, 0, 1))
65 | ab = Construct.line_intersect_line(a, b)
66 | self.assertEqual(ab[0].vector, Vector(1, 0, 0))
67 | self.assertEqual(ab[1].vector, Vector(-1, 0, 0))
68 |
69 | a = Line(Point(1, 0, 0), Point(0, 1, 0))
70 | b = Line(Point(0, 0, 1), Point(0, 1, 0))
71 | c = Circle(Point(0, 1, 0), Point(1, 0, 0))
72 | ac = Construct.line_intersect_circle(a, c)
73 | bc = Construct.line_intersect_circle(b, c)
74 | self.assertEqual(ac, None)
75 | self.assertEqual(bc[0].vector, Vector(0, 0, 1))
76 | self.assertEqual(bc[1].vector, Vector(0, 0, -1))
77 |
78 | a = Circle(Point(1, 0, 0), Point(0, 1, 0))
79 | b = Circle(Point(0, 1, 0), Point(0, 0, 1))
80 | ab = Construct.circle_intersect_circle(a, b)
81 | self.assertEqual(ab[0].vector, Vector(0, 0, 1))
82 | self.assertEqual(ab[1].vector, Vector(0, 0, -1))
83 |
84 | a = Circle(Point(1, 0, 0), Point(0, 1, 0))
85 | b = Circle(Point(0, 1, 0), Point(0, 1, 1))
86 | ab = Construct.circle_intersect_circle(a, b)
87 | self.assertTrue(ab[0].vector.close(Point(0, 1, 1).vector))
88 | self.assertTrue(ab[1].vector.close(Point(0, 1, -1).vector))
89 |
--------------------------------------------------------------------------------
/scot/tests/test_eegtopo/test_warp.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2014 SCoT Development Team
4 |
5 | import unittest
6 | import numpy as np
7 |
8 | from numpy.testing import assert_allclose
9 |
10 | from scot.eegtopo.warp_layout import warp_locations
11 | from scot.eegtopo.eegpos3d import positions as _eeglocs
12 |
13 |
14 | eeglocs = [p.list for p in _eeglocs]
15 |
16 |
17 | class TestWarpLocations(unittest.TestCase):
18 | def setUp(self):
19 | pass
20 |
21 | def tearDown(self):
22 | pass
23 |
24 | def test_interface(self):
25 | self.assertRaises(TypeError, warp_locations)
26 | self.assertEqual(warp_locations(np.eye(3)).shape, (3, 3)) # returns array
27 | self.assertEqual(len(warp_locations(np.eye(3), return_ellipsoid=True)), 3) # returns tuple
28 |
29 | def test_invariance(self):
30 | """unit-sphere locations should remain unchanged."""
31 | locs = [[1, 0, 0], [-1, 0, 0],
32 | [0, 1, 0], [0, -1, 0],
33 | [0, 0, 1], [0, 0, -1]]
34 | warp1 = warp_locations(locs)
35 | warp2, c, r = warp_locations(locs, return_ellipsoid=True)
36 |
37 | assert_allclose(warp1, locs, atol=1e-12)
38 | assert_allclose(warp2, locs, atol=1e-12)
39 | assert_allclose(c, 0, atol=1e-12)
40 | assert_allclose(r, 1, atol=1e-12)
41 |
42 | def test_eeglocations(self):
43 | np.random.seed(42)
44 |
45 | scale = np.random.rand(3) * 10 + 10
46 | displace = np.random.randn(3) * 100
47 | noise = np.random.randn(len(eeglocs), 3) * 5
48 |
49 | assert_allclose(warp_locations(eeglocs), eeglocs, atol=1e-10)
50 | assert_allclose(warp_locations(eeglocs * scale), eeglocs, atol=1e-10)
51 | assert_allclose(warp_locations(eeglocs * scale + displace), eeglocs, atol=1e-10)
52 | warp = warp_locations(eeglocs * scale + displace + noise)
53 | assert_allclose(np.sum(warp**2, axis=1), 1, atol=1e-12) # all locations on unit shpere
54 |
--------------------------------------------------------------------------------
/scot/tests/test_parallel.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2014 SCoT Development Team
4 |
5 | import unittest
6 |
7 | from scot.parallel import parallel_loop
8 |
9 |
10 | def f(x):
11 | return x**2 - 1
12 |
13 |
14 | def g(x, y, z):
15 | return x**y - z
16 |
17 |
18 | def h(x):
19 | return x, x**2
20 |
21 |
22 | class TestFunctions(unittest.TestCase):
23 | def setUp(self):
24 | pass
25 |
26 | def tearDown(self):
27 | pass
28 |
29 | def test_parallel_loop(self):
30 | verbose = 0
31 |
32 | # reference list comprehension
33 | ref = [f(i) for i in range(10)]
34 | reg = [g(i, j, 5) for i, j in enumerate(range(10, 20))]
35 | reh = [h(i) for i in range(10)]
36 |
37 | # test non-parallel execution
38 | par, func = parallel_loop(f, n_jobs=None, verbose=verbose)
39 | self.assertEqual(ref, par(func(i) for i in range(10)))
40 | # multiple arguments
41 | par, func = parallel_loop(g, n_jobs=None, verbose=verbose)
42 | self.assertEqual(reg, par(func(i, j, 5) for i, j in enumerate(range(10, 20))))
43 | # multiple return values
44 | par, func = parallel_loop(h, n_jobs=None, verbose=verbose)
45 | self.assertEqual(reh, par(func(i) for i in range(10)))
46 |
47 | # test non-parallel execution with joblib
48 | par, func = parallel_loop(f, n_jobs=1, verbose=verbose)
49 | b = par(func(i) for i in range(10))
50 | self.assertEqual(ref, par(func(i) for i in range(10)))
51 | # multiple arguments
52 | par, func = parallel_loop(g, n_jobs=1, verbose=verbose)
53 | self.assertEqual(reg, par(func(i, j, 5) for i, j in enumerate(range(10, 20))))
54 | # multiple return values
55 | par, func = parallel_loop(h, n_jobs=1, verbose=verbose)
56 | self.assertEqual(reh, par(func(i) for i in range(10)))
57 |
58 | # test parallel execution with joblib
59 | par, func = parallel_loop(f, n_jobs=2, verbose=verbose)
60 | b = par(func(i) for i in range(10))
61 | self.assertEqual(ref, par(func(i) for i in range(10)))
62 | # multiple arguments
63 | par, func = parallel_loop(g, n_jobs=2, verbose=verbose)
64 | self.assertEqual(reg, par(func(i, j, 5) for i, j in enumerate(range(10, 20))))
65 | # multiple return values
66 | par, func = parallel_loop(h, n_jobs=2, verbose=verbose)
67 | self.assertEqual(reh, par(func(i) for i in range(10)))
68 |
69 | # test parallel execution with joblib
70 | par, func = parallel_loop(f, n_jobs=-1, verbose=verbose)
71 | b = par(func(i) for i in range(10))
72 | self.assertEqual(ref, par(func(i) for i in range(10)))
73 | # multiple arguments
74 | par, func = parallel_loop(g, n_jobs=-1, verbose=verbose)
75 | self.assertEqual(reg, par(func(i, j, 5) for i, j in enumerate(range(10, 20))))
76 | # multiple return values
77 | par, func = parallel_loop(h, n_jobs=-1, verbose=verbose)
78 | self.assertEqual(reh, par(func(i) for i in range(10)))
79 |
80 | # test parallel execution with joblib
81 | par, func = parallel_loop(f, n_jobs=10, verbose=verbose)
82 | b = par(func(i) for i in range(10))
83 | self.assertEqual(ref, par(func(i) for i in range(10)))
84 | # multiple arguments
85 | par, func = parallel_loop(g, n_jobs=10, verbose=verbose)
86 | self.assertEqual(reg, par(func(i, j, 5) for i, j in enumerate(range(10, 20))))
87 | # multiple return values
88 | par, func = parallel_loop(h, n_jobs=10, verbose=verbose)
89 | self.assertEqual(reh, par(func(i) for i in range(10)))
90 |
91 | def test_output(self):
92 | from sys import stdout
93 |
94 | if not hasattr(stdout, 'getvalue'):
95 | self.skipTest("cannot grab stdout")
96 |
97 | par, func = parallel_loop(f, n_jobs=None, verbose=10)
98 | self.assertEqual(stdout.getvalue().strip().split(' ')[-1], 'serially')
99 | par, func = parallel_loop(f, n_jobs=2, verbose=10)
100 | self.assertEqual(stdout.getvalue().strip().split(' ')[-1], 'parallel')
101 |
--------------------------------------------------------------------------------
/scot/tests/test_pca.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2015 SCoT Development Team
4 |
5 | import unittest
6 |
7 | import numpy as np
8 |
9 | from scot.pca import pca
10 |
11 | try:
12 | from generate_testdata import generate_covsig
13 | except ImportError:
14 | from .generate_testdata import generate_covsig
15 |
16 | epsilon = 1e-10
17 |
18 |
19 | class TestFunctionality(unittest.TestCase):
20 | def setUp(self):
21 | pass
22 |
23 | def tearDown(self):
24 | pass
25 |
26 | def testIdentity(self):
27 | """identity covariance in -> identity covariance out
28 | test for up to 50 dimensions
29 | """
30 | for i in range(1, 50):
31 | x = generate_covsig(np.eye(i), 500)
32 | w, v = pca(x)
33 | c = np.cov(np.dot(w.T, x))
34 | self.assertTrue(np.allclose(c, np.eye(i)))
35 |
36 | def testSorting(self):
37 | """components should be sorted by decreasing variance
38 | """
39 | x = generate_covsig(np.diag([1, 9, 2, 6, 3, 8, 4, 5, 7]), 500)
40 | w, v = pca(x, sort_components=True)
41 | c = np.cov(np.dot(w.T, x))
42 | self.assertTrue(np.allclose(c, np.diag([9, 8, 7, 6, 5, 4, 3, 2, 1]), rtol=1e-1, atol=1e-2))
43 | w, v = pca(x, sort_components=True)
44 | c = np.cov(np.dot(w.T, x))
45 | self.assertTrue(np.allclose(c, np.diag([9, 8, 7, 6, 5, 4, 3, 2, 1]), rtol=1e-1, atol=1e-2))
46 |
47 | def testDecorrelation(self):
48 | """components should be decorrelated after PCA
49 | """
50 | x = generate_covsig([[3, 2, 1], [2, 3, 2], [1, 2, 3]], 500)
51 | w, v = pca(x)
52 | c = np.cov(np.dot(w.T, x))
53 | c -= np.diag(c.diagonal())
54 | self.assertTrue(np.allclose(c, np.zeros((3, 3)), rtol=1e-2, atol=1e-3))
55 |
56 |
57 | class TestDefaults(unittest.TestCase):
58 | def setUp(self):
59 | self.x = np.random.rand(10, 100)
60 | self.y = self.x.copy()
61 | self.m, self.n = self.x.shape
62 | self.w, self.v = pca(self.x)
63 |
64 | def tearDown(self):
65 | pass
66 |
67 | def testInputSafety(self):
68 | self.assertTrue((self.x == self.y).all())
69 |
70 | pca(self.x, subtract_mean=True, normalize=True)
71 | self.assertTrue((self.x == self.y).all())
72 |
73 | def testOutputSizes(self):
74 | self.assertTrue(self.w.shape == (self.m, self.m))
75 | self.assertTrue(self.v.shape == (self.m, self.m))
76 |
77 | def testInverse(self):
78 | i = np.abs(self.v.dot(self.w))
79 | self.assertTrue(np.abs(np.mean(i.diagonal())) - 1 < epsilon)
80 | self.assertTrue(np.abs(np.sum(i) - i.trace()) < epsilon)
81 |
82 | w, v = pca(self.x, subtract_mean=True, normalize=True)
83 | i = np.abs(v.dot(w))
84 | self.assertTrue(np.abs(np.mean(i.diagonal())) - 1 < epsilon)
85 | self.assertTrue(np.abs(np.sum(i) - i.trace()) < epsilon)
86 |
87 |
88 | class TestDimensionalityReduction(unittest.TestCase):
89 | def setUp(self):
90 | self.x = np.random.rand(10, 100)
91 | self.y = self.x.copy()
92 | self.m, self.n = self.x.shape
93 | self.w1, self.v1 = pca(self.x, reducedim=0.9)
94 | self.w2, self.v2 = pca(self.x, reducedim=5)
95 |
96 | def tearDown(self):
97 | pass
98 |
99 | def testOutputSizes(self):
100 | self.assertTrue(self.w2.shape == (self.m, 5))
101 | self.assertTrue(self.v2.shape == (5, self.m))
102 |
103 | def testPseudoInverse(self):
104 | i = self.v1.dot(self.w1)
105 | self.assertTrue(np.abs(np.mean(i.diagonal()) - 1) < epsilon)
106 |
107 | i = self.w1.dot(self.v1)
108 | self.assertFalse(np.abs(np.mean(i.diagonal()) - 1) < epsilon)
109 |
110 | i = self.v2.dot(self.w2)
111 | self.assertTrue(np.abs(np.mean(i.diagonal()) - 1) < epsilon)
112 |
113 | i = self.w2.dot(self.v2)
114 | self.assertFalse(np.abs(np.mean(i.diagonal()) - 1) < epsilon)
115 |
116 | def testSorting(self):
117 | """components should be sorted by decreasing variance
118 | """
119 | x = generate_covsig(np.diag([1, 9, 2, 6, 3, 8, 4, 5, 7]), 500)
120 | w, v = pca(x, reducedim=5)
121 | c = np.cov(np.dot(w.T, x))
122 | self.assertTrue(np.allclose(c, np.diag([9, 8, 7, 6, 5]), rtol=1e-1, atol=1e-2))
123 |
--------------------------------------------------------------------------------
/scot/tests/test_plainica.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2015 SCoT Development Team
4 |
5 | import unittest
6 | from importlib import import_module
7 |
8 | import numpy as np
9 |
10 | import scot
11 | from scot import plainica, datatools
12 | from scot.var import VAR
13 |
14 |
15 | class TestICA(unittest.TestCase):
16 | def setUp(self):
17 | pass
18 |
19 | def tearDown(self):
20 | pass
21 |
22 | def testInterface(self):
23 | self.assertRaises(TypeError, plainica.plainica)
24 | # simply pass in different data shapes and see if the functions runs without error
25 | plainica.plainica(np.sin(np.arange(30)).reshape((10, 3))) # 10 samples, 3 channels
26 | plainica.plainica(np.sin(np.arange(30)).reshape((5, 3, 2))) # 5 samples, 3 channels, 2 trials
27 |
28 | def testModelIdentification(self):
29 | """ generate independent signals, mix them, and see if ICA can reconstruct the mixing matrix
30 | do this for every backend """
31 |
32 | # original model coefficients
33 | b0 = np.zeros((3, 3)) # no connectivity
34 | m0 = b0.shape[0]
35 | l, t = 100, 100
36 |
37 | # generate VAR sources with non-gaussian innovation process, otherwise ICA won't work
38 | noisefunc = lambda: np.random.normal(size=(1, m0)) ** 3 / 1e3
39 |
40 | var = VAR(1)
41 | var.coef = b0
42 | sources = var.simulate([l, t], noisefunc)
43 |
44 | # simulate volume conduction... 3 sources measured with 7 channels
45 | mix = [[0.5, 1.0, 0.5, 0.2, 0.0, 0.0, 0.0],
46 | [0.0, 0.2, 0.5, 1.0, 0.5, 0.2, 0.0],
47 | [0.0, 0.0, 0.0, 0.2, 0.5, 1.0, 0.5]]
48 | data = datatools.dot_special(np.transpose(mix), sources)
49 |
50 | for backend_name, backend_gen in scot.backend.items():
51 |
52 | result = plainica.plainica(data, backend=backend_gen())
53 |
54 | i = result.mixing.dot(result.unmixing)
55 | self.assertTrue(np.allclose(i, np.eye(i.shape[0]), rtol=1e-6, atol=1e-7))
56 |
57 | permutations = [[0, 1, 2], [0, 2, 1], [1, 0, 2], [1, 2, 0], [2, 0, 1], [2, 1, 0]]
58 |
59 | bestdiff = np.inf
60 | bestmix = None
61 |
62 | absmix = np.abs(result.mixing)
63 | absmix /= np.max(absmix)
64 |
65 | for p in permutations:
66 | estmix = absmix[p, :]
67 | diff = np.sum((np.abs(estmix) - np.abs(mix)) ** 2)
68 |
69 | if diff < bestdiff:
70 | bestdiff = diff
71 | bestmix = estmix
72 |
73 | self.assertTrue(np.allclose(bestmix, mix, rtol=1e-1, atol=1e-1))
74 |
--------------------------------------------------------------------------------
/scot/tests/test_plotting.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013 SCoT Development Team
4 |
5 | import unittest
6 |
7 | import numpy as np
8 | import matplotlib.pyplot as plt
9 | from matplotlib.image import AxesImage
10 | from matplotlib.figure import Figure
11 |
12 | from scot.eegtopo.topoplot import Topoplot
13 | from scot import plotting as sp
14 | from scot.varbase import VARBase
15 |
16 |
17 | class TestFunctionality(unittest.TestCase):
18 | def setUp(self):
19 | self.locs = [[0, 0, 1], [1, 0, 0], [0, 1, 0], [-1, 0, 0], [0, -1, 0]]
20 | self.vals = [[1, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 1, 1, 1, 1] ]
21 |
22 | self.topo = Topoplot()
23 | self.topo.set_locations(self.locs)
24 | self.maps = sp.prepare_topoplots(self.topo, self.vals)
25 |
26 | def tearDown(self):
27 | plt.close('all')
28 |
29 | def test_topoplots(self):
30 | locs, vals, topo, maps = self.locs, self.vals, self.topo, self.maps
31 |
32 | self.assertEqual(len(maps), len(vals)) # should get two topo maps
33 |
34 | self.assertTrue(np.allclose(maps[0], maps[0].T)) # first map: should be rotationally identical (blob in the middle)
35 | self.assertTrue(np.alltrue(maps[1] == 0)) # second map: should be all zeros
36 | self.assertTrue(np.alltrue(maps[2] == 1)) # third map: should be all ones
37 |
38 | #--------------------------------------------------------------------
39 |
40 | a1 = sp.plot_topo(plt.gca(), topo, maps[0])
41 | a2 = sp.plot_topo(plt.gca(), topo, maps[0], crange=[-1, 1], offset=(1, 1))
42 |
43 | self.assertIsInstance(a1, AxesImage)
44 | self.assertIsInstance(a2, AxesImage)
45 |
46 | #--------------------------------------------------------------------
47 |
48 | f1 = sp.plot_sources(topo, maps, maps)
49 | f2 = sp.plot_sources(topo, maps, maps, 90, f1)
50 |
51 | self.assertIs(f1, f2)
52 | self.assertIsInstance(f1, Figure)
53 |
54 | #--------------------------------------------------------------------
55 |
56 | f1 = sp.plot_connectivity_topos(topo=topo, topomaps=maps, layout='diagonal')
57 | f2 = sp.plot_connectivity_topos(topo=topo, topomaps=maps, layout='somethingelse')
58 |
59 | self.assertEqual(len(f1.axes), len(vals))
60 | self.assertEqual(len(f2.axes), len(vals)*2)
61 |
62 | def test_connectivity_spectrum(self):
63 | a = np.array([[[0, 0], [0, 1], [0, 2]],
64 | [[1, 0], [1, 1], [1, 2]],
65 | [[2, 0], [2, 1], [2, 2]]])
66 | f = sp.plot_connectivity_spectrum(a, diagonal=0)
67 | self.assertIsInstance(f, Figure)
68 | self.assertEqual(len(f.axes), 9)
69 |
70 | f = sp.plot_connectivity_spectrum(a, diagonal=1)
71 | self.assertEqual(len(f.axes), 3)
72 |
73 | f = sp.plot_connectivity_spectrum(a, diagonal=-1)
74 | self.assertEqual(len(f.axes), 6)
75 |
76 | def test_connectivity_significance(self):
77 | a = np.array([[[0, 0], [0, 1], [0, 2]],
78 | [[1, 0], [1, 1], [1, 2]],
79 | [[2, 0], [2, 1], [2, 2]]])
80 | f = sp.plot_connectivity_significance(a, diagonal=0)
81 | self.assertIsInstance(f, Figure)
82 | self.assertEqual(len(f.axes), 9)
83 |
84 | f = sp.plot_connectivity_significance(a, diagonal=1)
85 | self.assertEqual(len(f.axes), 3)
86 |
87 | f = sp.plot_connectivity_significance(a, diagonal=-1)
88 | self.assertEqual(len(f.axes), 6)
89 |
90 | def test_connectivity_timespectrum(self):
91 | a = np.array([[[[0, 0], [0, 1], [0, 2]],
92 | [[1, 0], [1, 1], [1, 2]],
93 | [[2, 0], [2, 1], [2, 2]]]]).repeat(4, 0).transpose([1,2,3,0])
94 | f = sp.plot_connectivity_timespectrum(a, diagonal=0)
95 | self.assertIsInstance(f, Figure)
96 | self.assertEqual(len(f.axes), 9)
97 |
98 | f = sp.plot_connectivity_timespectrum(a, diagonal=1)
99 | self.assertEqual(len(f.axes), 3)
100 |
101 | f = sp.plot_connectivity_timespectrum(a, diagonal=-1)
102 | self.assertEqual(len(f.axes), 6)
103 |
104 | def test_circular(self):
105 | w = [[1, 1, 1],
106 | [1, 1, 1],
107 | [1, 1, 1]]
108 | c = [[[1, 1, 1], [1, 1, 1], [1, 1, 1]],
109 | [[1, 1, 1], [1, 1, 1], [1, 1, 1]],
110 | [[1, 1, 1], [1, 1, 1], [1, 1, 1]]]
111 |
112 | sp.plot_circular(1, [1, 1, 1], topo=self.topo, topomaps=self.maps)
113 | sp.plot_circular(w, [1, 1, 1], topo=self.topo, topomaps=self.maps)
114 | sp.plot_circular(1, c, topo=self.topo, topomaps=self.maps)
115 | sp.plot_circular(w, c, topo=self.topo, topomaps=self.maps)
116 | sp.plot_circular(w, c, mask=False, topo=self.topo, topomaps=self.maps)
117 |
118 | def test_whiteness(self):
119 | np.random.seed(91)
120 |
121 | var = VARBase(0)
122 | var.residuals = np.random.randn(10, 5, 100)
123 |
124 | pr = sp.plot_whiteness(var, 20, repeats=100)
125 |
126 | self.assertGreater(pr, 0.05)
127 |
--------------------------------------------------------------------------------
/scot/tests/test_statistics.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2014-2015 SCoT Development Team
4 |
5 | import unittest
6 |
7 | import numpy as np
8 |
9 | from scot.var import VAR
10 | import scot.connectivity_statistics as cs
11 |
12 |
13 | class TestFunctions(unittest.TestCase):
14 | def setUp(self):
15 | pass
16 |
17 | def tearDown(self):
18 | pass
19 |
20 | @staticmethod
21 | def generate_data():
22 | var = VAR(2)
23 | var.coef = np.array([[0.2, 0.1, 0, 0], [0.7, -0.4, 0.1, 0]])
24 | l = (100, 100)
25 | x = var.simulate(l)
26 | return x, var
27 |
28 | def test_surrogate(self):
29 | np.random.seed(31415)
30 | x, var0 = self.generate_data()
31 |
32 | result = cs.surrogate_connectivity('PDC', x, VAR(2), nfft=4,
33 | repeats=100)
34 | self.assertEqual(result.shape, (100, 2, 2, 4))
35 |
36 | structure = np.mean(np.mean(result, axis=3), axis=0)
37 | self.assertTrue(np.all(np.abs(structure-np.eye(2)) < 0.05))
38 |
39 | def test_jackknife(self):
40 | np.random.seed(31415)
41 | x, var0 = self.generate_data()
42 |
43 | result = cs.jackknife_connectivity('PDC', x, VAR(2), nfft=4,
44 | leaveout=1)
45 | self.assertEqual(result.shape, (100, 2, 2, 4))
46 |
47 | structure = np.mean(np.mean(result, axis=3), axis=0)
48 | # make sure result has roughly the correct structure
49 | self.assertTrue(np.all(np.abs(structure-[[1, 0], [0.5, 1]]) < 0.25))
50 |
51 | def test_bootstrap(self):
52 | np.random.seed(31415)
53 | x, var0 = self.generate_data()
54 |
55 | result = cs.bootstrap_connectivity('PDC', x, VAR(2), nfft=4,
56 | repeats=100)
57 | self.assertEqual(result.shape, (100, 2, 2, 4))
58 |
59 | structure = np.mean(np.mean(result, axis=3), axis=0)
60 | # make sure result has roughly the correct structure
61 | self.assertTrue(np.all(np.abs(structure - [[1, 0], [0.5, 1]]) < 0.25))
62 |
63 | def test_bootstrap_difference_and_fdr(self):
64 | # Generate reference data
65 | np.random.seed(31415)
66 | x, var0 = self.generate_data()
67 | a = cs.bootstrap_connectivity('PDC', x, VAR(2), nfft=4, repeats=100)
68 |
69 | # Similar to reference data ==> no significant differences expected
70 | np.random.seed(12345)
71 | x, var0 = self.generate_data()
72 | b = cs.bootstrap_connectivity('PDC', x, VAR(2), nfft=4, repeats=100)
73 | p = cs.test_bootstrap_difference(a, b)
74 | self.assertFalse(np.any(p < 0.01)) # TODO: np.all?
75 | self.assertFalse(np.any(cs.significance_fdr(p, 0.05))) # TODO: np.all?
76 |
77 | # Trials rearranged ==> no significant differences expected
78 | np.random.seed(12345)
79 | x, var0 = self.generate_data()
80 | b = cs.bootstrap_connectivity('PDC', x[::-1, :, :], VAR(2), nfft=4,
81 | repeats=100)
82 | p = cs.test_bootstrap_difference(a, b)
83 | self.assertFalse(np.any(p < 0.01))
84 | self.assertFalse(np.any(cs.significance_fdr(p, 0.05)))
85 |
86 | # Channels rearranged ==> highly significant differences expected
87 | np.random.seed(12345)
88 | x, var0 = self.generate_data()
89 | b = cs.bootstrap_connectivity('PDC', x[1, ::-1, :], VAR(2), nfft=4,
90 | repeats=100)
91 | p = cs.test_bootstrap_difference(a, b)
92 | self.assertTrue(np.all(p < 0.0001))
93 | self.assertTrue(np.all(cs.significance_fdr(p, 0.01)))
94 |
95 | # Time reversed ==> highly significant differences expected
96 | np.random.seed(12345)
97 | x, var0 = self.generate_data()
98 | b = cs.bootstrap_connectivity('PDC', x[1, :, ::-1], VAR(2), nfft=4,
99 | repeats=100)
100 | p = cs.test_bootstrap_difference(a, b)
101 | self.assertTrue(np.all(p < 0.0001))
102 | self.assertTrue(np.all(cs.significance_fdr(p, 0.01)))
103 |
--------------------------------------------------------------------------------
/scot/tests/test_utils.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2015 SCoT Development Team
4 |
5 | from __future__ import division
6 |
7 | import unittest
8 |
9 | import numpy as np
10 |
11 | import scot
12 | import scot.datatools
13 | from scot import utils
14 |
15 |
16 | class TestUtils(unittest.TestCase):
17 | def setUp(self):
18 | pass
19 |
20 | def tearDown(self):
21 | pass
22 |
23 | def test_memoize(self):
24 | class Obj(object):
25 | @scot.utils.memoize
26 | def add_to(self, arg):
27 | return self + arg
28 | self.assertRaises(TypeError, Obj.add_to, 1)
29 | self.assertEqual(3, Obj.add_to(1, 2))
30 | self.assertEqual(3, Obj.add_to(1, 2))
31 |
32 | class Obj(object):
33 | @scot.utils.memoize
34 | def squareone(self, a):
35 | return a * a + 1
36 | obj = Obj()
37 | self.assertEqual(2, obj.squareone(1))
38 | self.assertEqual(2, obj.squareone(1))
39 | self.assertEqual(5, obj.squareone(2))
40 | self.assertEqual(10, obj.squareone(3))
41 | self.assertEqual(5, obj.squareone(2))
42 | self.assertEqual(10, obj.squareone(3))
43 |
44 | def test_cuthill(self):
45 | A = np.array([[0,0,1,1], [0,0,0,0], [1,0,1,0], [1,0,0,0]])
46 | p = scot.utils.cuthill_mckee(A)
47 | self.assertEqual(p, [1, 3, 0, 2])
48 |
--------------------------------------------------------------------------------
/scot/tests/test_var.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2015 SCoT Development Team
4 |
5 | import unittest
6 |
7 | import numpy as np
8 | from numpy.testing import assert_allclose
9 |
10 | from scot.varbase import VARBase as VAR
11 | from scot.datatools import acm
12 |
13 | epsilon = 1e-10
14 |
15 |
16 | class TestVAR(unittest.TestCase):
17 | def setUp(self):
18 | pass
19 |
20 | def tearDown(self):
21 | pass
22 |
23 | def generate_data(self, cc=((1, 0), (0, 1))):
24 | var = VAR(2)
25 | var.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
26 | l = (1000, 100)
27 | x = var.simulate(l, lambda: np.random.randn(2).dot(cc))
28 | self.assertEqual(x.shape, (l[1], 2, l[0]))
29 | return x, var
30 |
31 | def test_abstract(self):
32 | self.assertRaises(NotImplementedError, VAR(1).fit, [None])
33 | self.assertRaises(NotImplementedError, VAR(1).optimize, [None])
34 |
35 | def test_simulate(self):
36 | noisefunc = lambda: [1, 1] # use deterministic function instead of noise
37 | num_samples = 100
38 |
39 | b = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
40 |
41 | var = VAR(2)
42 | var.coef = b
43 |
44 | np.random.seed(42)
45 | x = var.simulate(num_samples, noisefunc)
46 | self.assertEqual(x.shape, (1, b.shape[0], num_samples))
47 |
48 | # make sure we got expected values within reasonable accuracy
49 | for n in range(10, num_samples):
50 | self.assertTrue(np.all(
51 | np.abs(x[0, :, n] - 1
52 | - np.dot(x[0, :, n - 1], b[:, 0::2].T)
53 | - np.dot(x[0, :, n - 2], b[:, 1::2].T)) < 1e-10))
54 |
55 | def test_predict(self):
56 | np.random.seed(777)
57 | x, var = self.generate_data()
58 | z = var.predict(x)
59 | self.assertTrue(np.abs(np.var(x[:, :, 100:] - z[:, :, 100:]) - 1) < 0.005)
60 |
61 | def test_yulewalker(self):
62 | np.random.seed(7353)
63 | x, var0 = self.generate_data([[1, 2], [3, 4]])
64 |
65 | acms = [acm(x, l) for l in range(var0.p+1)]
66 |
67 | var = VAR(var0.p)
68 | var.from_yw(acms)
69 |
70 | assert_allclose(var0.coef, var.coef, rtol=1e-2, atol=1e-2)
71 |
72 | # that limit is rather generous, but we don't want tests to fail due to random variation
73 | self.assertTrue(np.all(np.abs(var0.coef - var.coef) < 0.02))
74 | self.assertTrue(np.all(np.abs(var0.rescov - var.rescov) < 0.02))
75 |
76 | def test_whiteness(self):
77 | np.random.seed(91)
78 | r = np.random.randn(80, 15, 100) # gaussian white noise
79 | r0 = r.copy()
80 |
81 | var = VAR(0, n_jobs=-1)
82 | var.residuals = r
83 |
84 | p = var.test_whiteness(20, random_state=1)
85 |
86 | self.assertTrue(np.all(r == r0)) # make sure we don't modify the input
87 | self.assertGreater(p, 0.01) # test should be non-significant for white noise
88 |
89 | r[:, 1, 3:] = r[:, 0, :-3] # create cross-correlation at lag 3
90 | p = var.test_whiteness(20)
91 | self.assertLessEqual(p, 0.01) # now test should be significant
92 |
93 | def test_stable(self):
94 | var = VAR(1)
95 |
96 | # Stable AR model -- rule of thumb: sum(coefs) < 1
97 | var.coef = np.asarray([[0.5, 0.3]])
98 | self.assertTrue(var.is_stable())
99 |
100 | # Unstable AR model -- rule of thumb: sum(coefs) > 1
101 | var.coef = np.asarray([[0.5, 0.7]])
102 | self.assertFalse(var.is_stable())
103 |
--------------------------------------------------------------------------------
/scot/tests/test_var_builtin.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2015 SCoT Development Team
4 |
5 | import unittest
6 |
7 | import numpy as np
8 |
9 | from scot.var import VAR
10 |
11 | epsilon = 1e-10
12 |
13 |
14 | class TestVAR(unittest.TestCase):
15 | def setUp(self):
16 | pass
17 |
18 | def tearDown(self):
19 | pass
20 |
21 | def test_fit(self):
22 | var0 = VAR(2)
23 | var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
24 | l = 100000
25 | x = var0.simulate(l)
26 | y = x.copy()
27 |
28 | var = VAR(2)
29 | var.fit(x)
30 |
31 | # make sure the input remains unchanged
32 | self.assertTrue(np.all(x == y))
33 |
34 | # that limit is rather generous, but we don't want tests to fail due to random variation
35 | self.assertTrue(np.all(np.abs(var0.coef - var.coef) < 0.02))
36 |
37 | def test_fit_regularized(self):
38 | l = 100000
39 | var0 = VAR(2)
40 | var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
41 | x = var0.simulate(l)
42 | y = x.copy()
43 |
44 | var = VAR(10, delta=1)
45 | var.fit(x)
46 |
47 | # make sure the input remains unchanged
48 | self.assertTrue(np.all(x == y))
49 |
50 | b0 = np.zeros((2, 20))
51 | b0[:, 0:2] = var0.coef[:, 0:2]
52 | b0[:, 10:12] = var0.coef[:, 2:4]
53 |
54 | # that limit is rather generous, but we don't want tests to fail due to random variation
55 | self.assertTrue(np.all(np.abs(b0 - var.coef) < 0.02))
56 |
57 | def test_residuals(self):
58 | l = 100000
59 | var0 = VAR(2)
60 | var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
61 | x = var0.simulate(l)
62 |
63 | var = VAR(2)
64 | var.fit(x)
65 |
66 | self.assertEqual(x.shape, var.residuals.shape)
67 |
68 | self.assertTrue(np.allclose(var.rescov, np.eye(var.rescov.shape[0]), 1e-2, 1e-2))
69 |
70 | def test_optimize(self):
71 | np.random.seed(745)
72 | var0 = VAR(2)
73 | var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
74 | l = (100, 10)
75 | x = var0.simulate(l)
76 |
77 | for n_jobs in [None, -1, 1, 2]:
78 | var = VAR(-1, n_jobs=n_jobs, verbose=0)
79 |
80 | var.optimize_order(x)
81 | self.assertEqual(var.p, 2)
82 |
83 | var.optimize_order(x, min_p=1, max_p=1)
84 | self.assertEqual(var.p, 1)
85 |
86 | def test_bisection_overdetermined(self):
87 | np.random.seed(42)
88 | var0 = VAR(2)
89 | var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
90 | l = (100, 10)
91 | x = var0.simulate(l)
92 |
93 | var = VAR(2)
94 | var.optimize_delta_bisection(x)
95 |
96 | # nice data, so the regularization should not be too strong.
97 | self.assertLess(var.delta, 10)
98 |
99 | def test_bisection_underdetermined(self):
100 | n_trials, n_samples = 10, 10
101 | np.random.seed(42)
102 | var0 = VAR(2)
103 | var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
104 | x = var0.simulate((n_samples, n_trials))
105 | x = np.concatenate([x, np.random.randn(n_trials, 8, n_samples)], axis=1)
106 |
107 | var = VAR(7)
108 | var.optimize_delta_bisection(x)
109 |
110 | # nice data, so the regularization should not be too weak.
111 | self.assertGreater(var.delta, 10)
112 |
113 | def test_bisection_invalid(self):
114 | np.random.seed(42)
115 | x = np.random.randn(10, 100, 10)
116 |
117 | var = VAR(1)
118 | var.optimize_delta_bisection(x)
119 |
120 | # totally ugly data, should be unable to find reasonable regularization.
121 | self.assertEqual(var.delta, 0)
122 |
--------------------------------------------------------------------------------
/scot/tests/test_var_sklearn.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2015 SCoT Development Team
4 |
5 | import unittest
6 |
7 | from numpy.testing import assert_array_almost_equal, assert_equal
8 |
9 | import numpy as np
10 | from sklearn.linear_model import Ridge, RidgeCV, Lasso, LassoLars, ElasticNet
11 |
12 | from scot.backend_sklearn import generate
13 |
14 |
15 | backend_sklearn = generate()
16 | VAR = backend_sklearn['var']
17 |
18 |
19 | class CommonTests(unittest.TestCase):
20 | def setUp(self):
21 | np.random.seed(12345)
22 | self.var0 = VAR(2)
23 | self.var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
24 | self.x = self.var0.simulate((1000, 100))
25 | self.var = VAR(2)
26 |
27 | def tearDown(self):
28 | pass
29 |
30 | def test_fit(self):
31 | self.var.fit(self.x)
32 |
33 | b0 = np.zeros_like(self.var.coef)
34 | b0[:, 0: 2] = self.var0.coef[:, 0:2]
35 | b0[:, self.var.p: self.var.p + 2] = self.var0.coef[:, 2: 4]
36 |
37 | assert_array_almost_equal(b0, self.var.coef, decimal=2)
38 | self.assertEqual(self.x.shape, self.var.residuals.shape)
39 | assert_array_almost_equal(self.var.rescov,
40 | np.eye(self.var.rescov.shape[0]), decimal=2)
41 |
42 |
43 | class TestRidge(CommonTests):
44 | def setUp(self):
45 | super(TestRidge, self).setUp()
46 | self.var = VAR(10, Ridge(alpha=100))
47 |
48 |
49 | class TestRidgeCV(CommonTests):
50 | def setUp(self):
51 | super(TestRidgeCV, self).setUp()
52 | # Provide three candidates for alpha.
53 | self.var = VAR(10, RidgeCV(alphas=[10, 100, 1000]))
54 |
55 | def test_alpha(self):
56 | """ This test checks if RidgeCV finds the optimal `alpha`.
57 | """
58 | self.var.fit(self.x)
59 | # Currently we simply *know* empirically that from the three
60 | # candidate alphas 100 is closest to the optimum.
61 | # TODO: programmatically derive the optimum from the data
62 | assert_equal(self.var.fitting_model.alpha_, 100)
63 |
64 |
65 | class TestLasso(CommonTests):
66 | def setUp(self):
67 | super(TestLasso, self).setUp()
68 | self.var = VAR(10, Lasso(alpha=0.001))
69 |
70 |
71 | class TestLassoLars(CommonTests):
72 | def setUp(self):
73 | super(TestLassoLars, self).setUp()
74 | self.var = VAR(10, LassoLars(alpha=0.00001))
75 |
76 |
77 | class TestElasticNet(CommonTests):
78 | def setUp(self):
79 | super(TestElasticNet, self).setUp()
80 | self.var = VAR(10, ElasticNet(alpha=0.01, l1_ratio=0.5))
81 |
--------------------------------------------------------------------------------
/scot/tests/test_varica.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2015 SCoT Development Team
4 |
5 | import unittest
6 | from importlib import import_module
7 |
8 | import numpy as np
9 | from numpy.testing import assert_allclose
10 |
11 | import scot
12 | from scot import varica, datatools
13 | from scot.var import VAR
14 |
15 |
16 | class TestMVARICA(unittest.TestCase):
17 | def setUp(self):
18 | pass
19 |
20 | def tearDown(self):
21 | pass
22 |
23 | def testInterface(self):
24 | self.assertRaises(TypeError, varica.mvarica)
25 | # simply pass in different data shapes and see if the functions runs without error
26 | varica.mvarica(np.sin(np.arange(30)).reshape((10, 3)), VAR(1)) # 10 samples, 3 channels
27 | varica.mvarica(np.sin(np.arange(30)).reshape((5, 3, 2)), VAR(1)) # 5 samples, 3 channels, 2 trials
28 |
29 | def testFit(self):
30 | """ Test submodel fitting on instationary data
31 | """
32 | np.random.seed(42)
33 |
34 | # original model coefficients
35 | b01 = np.array([[0.0, 0], [0, 0]])
36 | b02 = np.array([[0.5, 0.3], [0.3, 0.5]])
37 | b03 = np.array([[0.1, 0.1], [0.1, 0.1]])
38 | t, m, l = 10, 2, 100
39 |
40 | noisefunc = lambda: np.random.normal(size=(1, m)) ** 3 / 1e3
41 |
42 | var = VAR(1)
43 | var.coef = b01
44 | sources1 = var.simulate([l, t], noisefunc)
45 | var.coef = b02
46 | sources2 = var.simulate([l, t], noisefunc)
47 | var.coef = b03
48 | sources3 = var.simulate([l, t * 2], noisefunc)
49 |
50 | sources = np.vstack([sources1, sources2, sources3])
51 | cl = [1] * t + [2] * t + [1, 2] * t
52 |
53 | var = VAR(1)
54 | r_trial = varica.mvarica(sources, var, cl, reducedim='no_pca', varfit='trial')
55 | r_class = varica.mvarica(sources, var, cl, reducedim='no_pca', varfit='class')
56 | r_ensemble = varica.mvarica(sources, var, cl, reducedim='no_pca', varfit='ensemble')
57 |
58 | vars = [np.var(r.var_residuals) for r in [r_trial, r_class, r_ensemble]]
59 |
60 | # class one consists of trials generated with b01 and b03
61 | # class two consists of trials generated with b02 and b03
62 | #
63 | # ensemble fitting cannot resolve any model -> highest residual variance
64 | # class fitting cannot only resolve (b01+b03) vs (b02+b03) -> medium residual variance
65 | # trial fitting can resolve all three models -> lowest residual variance
66 |
67 | self.assertLess(vars[0], vars[1])
68 | self.assertLess(vars[1], vars[2])
69 |
70 | def testModelIdentification(self):
71 | """ generate VAR signals, mix them, and see if MVARICA can reconstruct the signals
72 | do this for every backend """
73 |
74 | # original model coefficients
75 | b0 = np.zeros((3, 6))
76 | b0[1:3, 2:6] = [[0.4, -0.2, 0.3, 0.0],
77 | [-0.7, 0.0, 0.9, 0.0]]
78 | m0 = b0.shape[0]
79 | l, t = 1000, 100
80 |
81 | # generate VAR sources with non-gaussian innovation process, otherwise ICA won't work
82 | noisefunc = lambda: np.random.normal(size=(1, m0)) ** 3 / 1e3
83 |
84 | var = VAR(2)
85 | var.coef = b0
86 | sources = var.simulate([l, t], noisefunc)
87 |
88 | # simulate volume conduction... 3 sources measured with 7 channels
89 | mix = [[0.5, 1.0, 0.5, 0.2, 0.0, 0.0, 0.0],
90 | [0.0, 0.2, 0.5, 1.0, 0.5, 0.2, 0.0],
91 | [0.0, 0.0, 0.0, 0.2, 0.5, 1.0, 0.5]]
92 | data = datatools.dot_special(np.transpose(mix), sources)
93 |
94 | for backend_name, backend_gen in scot.backend.items():
95 |
96 | # apply MVARICA
97 | # - default setting of 0.99 variance should reduce to 3 channels with this data
98 | # - automatically determine delta (enough data, so it should most likely be 0)
99 | result = varica.mvarica(data, var, optimize_var=True, backend=backend_gen())
100 |
101 | # ICA does not define the ordering and sign of components
102 | # so wee need to test all combinations to find if one of them fits the original coefficients
103 | permutations = np.array(
104 | [[0, 1, 2, 3, 4, 5], [0, 1, 4, 5, 2, 3], [2, 3, 4, 5, 0, 1], [2, 3, 0, 1, 4, 5], [4, 5, 0, 1, 2, 3],
105 | [4, 5, 2, 3, 0, 1]])
106 | signperms = np.array(
107 | [[1, 1, 1, 1, 1, 1], [1, 1, 1, 1, -1, -1], [1, 1, -1, -1, 1, 1], [1, 1, -1, -1, -1, -1],
108 | [-1, -1, 1, 1, 1, 1], [-1, -1, 1, 1, -1, -1], [-1, -1, -1, -1, 1, 1], [-1, -1, -1, -1, -1, -1]])
109 |
110 | best, d = np.inf, None
111 |
112 | for perm in permutations:
113 | b = result.b.coef[perm[::2] // 2, :]
114 | b = b[:, perm]
115 | for sgn in signperms:
116 | c = b * np.repeat([sgn], 3, 0) * np.repeat([sgn[::2]], 6, 0).T
117 | err = np.sum((c - b0) ** 2)
118 | if err < best:
119 | best = err
120 | d = c
121 |
122 | assert_allclose(d, b0, rtol=1e-2, atol=1e-2)
123 |
124 |
125 | class TestCSPVARICA(unittest.TestCase):
126 | def setUp(self):
127 | pass
128 |
129 | def tearDown(self):
130 | pass
131 |
132 | def testInterface(self):
133 | # self.assertRaises(TypeError, varica.cspvarica)
134 | # simply pass in different data shapes and see if the functions runs without error
135 | self.assertRaises(AttributeError, varica.cspvarica, np.sin(np.arange(30)).reshape((10, 3)), VAR(1), [0])
136 | # varica.cspvarica(np.sin(np.arange(30)).reshape((2, 3, 5)), VAR(1), ['A', 'B']) # 5 samples, 3 channels, 2 trials
137 |
138 | def testFit(self):
139 | """ Test submodel fitting on instationary data
140 | """
141 | np.random.seed(42)
142 |
143 | # original model coefficients
144 | b01 = np.array([[0.0, 0], [0, 0]])
145 | b02 = np.array([[0.5, 0.3], [0.3, 0.5]])
146 | b03 = np.array([[0.1, 0.1], [0.1, 0.1]])
147 | t, m, l = 10, 2, 100
148 |
149 | noisefunc = lambda: np.random.normal(size=(1, m)) ** 3 / 1e3
150 |
151 | var = VAR(1)
152 | var.coef = b01
153 | sources1 = var.simulate([l, t], noisefunc)
154 | var.coef = b02
155 | sources2 = var.simulate([l, t], noisefunc)
156 | var.coef = b03
157 | sources3 = var.simulate([l, t * 2], noisefunc)
158 |
159 | sources = np.vstack([sources1, sources2, sources3])
160 | cl = [1] * t + [2] * t + [1, 2] * t
161 |
162 | var = VAR(1)
163 | r_trial = varica.cspvarica(sources, var, cl, reducedim=None, varfit='trial')
164 | r_class = varica.cspvarica(sources, var, cl, reducedim=None, varfit='class')
165 | r_ensemble = varica.cspvarica(sources, var, cl, reducedim=None, varfit='ensemble')
166 |
167 | vars = [np.var(r.var_residuals) for r in [r_trial, r_class, r_ensemble]]
168 |
169 | # class one consists of trials generated with b01 and b03
170 | # class two consists of trials generated with b02 and b03
171 | #
172 | # ensemble fitting cannot resolve any model -> highest residual variance
173 | # class fitting cannot only resolve (b01+b03) vs (b02+b03) -> medium residual variance
174 | # trial fitting can resolve all three models -> lowest residual variance
175 | print(vars)
176 |
177 | self.assertLess(vars[0], vars[1])
178 | self.assertLess(vars[1], vars[2])
179 |
180 |
181 |
--------------------------------------------------------------------------------
/scot/tests/test_xvschema.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2015 SCoT Development Team
4 |
5 | import unittest
6 |
7 | import numpy as np
8 | from numpy.testing import assert_array_equal
9 |
10 | import scot.xvschema
11 |
12 |
13 | class TestBuiltin(unittest.TestCase):
14 | def setUp(self):
15 | pass
16 |
17 | def tearDown(self):
18 | pass
19 |
20 | def test_singletrial(self):
21 | n_trials = 10
22 | xv = scot.xvschema.singletrial(n_trials)
23 | for n, (train, test) in enumerate(xv):
24 | self.assertEqual(len(train), 1)
25 | self.assertEqual(len(test), n_trials - 1)
26 |
27 | for t in train:
28 | self.assertTrue(t not in test)
29 |
30 | self.assertEqual(train[0], n)
31 |
32 | def test_multitrial(self):
33 | n_trials = 10
34 | xv = scot.xvschema.multitrial(n_trials)
35 | for n, (train, test) in enumerate(xv):
36 | self.assertEqual(len(test), 1)
37 | self.assertEqual(len(train), n_trials - 1)
38 |
39 | for t in train:
40 | self.assertTrue(t not in test)
41 |
42 | self.assertEqual(test[0], n)
43 |
44 | def test_splitset(self):
45 | n_trials = 10
46 | xv = scot.xvschema.splitset(n_trials)
47 | for n, (train, test) in enumerate(xv):
48 | self.assertEqual(len(test), n_trials // 2)
49 | self.assertEqual(len(train), n_trials // 2)
50 |
51 | for t in train:
52 | self.assertTrue(t not in test)
53 |
54 | def test_nfold(self):
55 | n_trials = 50
56 | n_blocks = 5
57 | xv = scot.xvschema.make_nfold(n_blocks)(n_trials)
58 | for n, (train, test) in enumerate(xv):
59 | self.assertEqual(len(test), n_trials // n_blocks)
60 | self.assertEqual(len(train), n_trials - n_trials // n_blocks)
61 |
62 | for t in train:
63 | self.assertTrue(t not in test)
64 | self.assertEqual(n + 1, n_blocks)
65 |
66 |
67 | class TestSklearn(unittest.TestCase):
68 | def setUp(self):
69 | try:
70 | import sklearn
71 | except ImportError:
72 | self.skipTest("could not import scikit-learn")
73 |
74 | def tearDown(self):
75 | pass
76 |
77 | def test_leave1out(self):
78 | from sklearn.model_selection import LeaveOneOut
79 | n_trials = 10
80 | xv1 = scot.xvschema.multitrial(n_trials)
81 | xv2 = LeaveOneOut().split(np.arange(n_trials))
82 | self._comparexv(xv1, xv2)
83 |
84 | def test_kfold(self):
85 | from sklearn.model_selection import KFold
86 | n_trials = 15
87 | n_blocks = 5
88 | xv1 = scot.xvschema.make_nfold(n_blocks)(n_trials)
89 | xv2 = KFold(n_splits=n_blocks, shuffle=False).split(np.arange(n_trials))
90 | self._comparexv(xv1, xv2)
91 |
92 | def test_application(self):
93 | from scot.var import VAR
94 | from sklearn.model_selection import LeaveOneOut, KFold
95 | np.random.seed(42)
96 | x = np.random.randn(10, 3, 15)
97 |
98 | var = VAR(3, xvschema=lambda n, _: LeaveOneOut().split(range(n))).optimize_delta_bisection(x)
99 | self.assertGreater(var.delta, 0)
100 | var = VAR(3, xvschema=lambda n, _: KFold(5).split(range(n))).optimize_delta_bisection(x)
101 | self.assertGreater(var.delta, 0)
102 |
103 | def _comparexv(self, xv1, xv2):
104 | for (a, b), (c, d) in zip(xv1, xv2):
105 | assert_array_equal(a, c)
106 | assert_array_equal(b, d)
107 |
--------------------------------------------------------------------------------
/scot/utils.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013-2015 SCoT Development Team
4 |
5 | """ Utility functions """
6 |
7 | from __future__ import division
8 |
9 | from functools import partial
10 |
11 | import numpy as np
12 |
13 |
14 | def check_random_state(seed):
15 | """Turn seed into a np.random.RandomState instance.
16 |
17 | If seed is None, return the RandomState singleton used by np.random.
18 | If seed is an int, return a new RandomState instance seeded with seed.
19 | If seed is already a RandomState instance, return it.
20 | Otherwise raise ValueError.
21 | """
22 | if seed is None or seed is np.random:
23 | return np.random.mtrand._rand
24 | if isinstance(seed, (int, np.integer)):
25 | return np.random.RandomState(seed)
26 | if isinstance(seed, np.random.RandomState):
27 | return seed
28 | raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
29 | ' instance' % seed)
30 |
31 |
32 | def cuthill_mckee(matrix):
33 | """Implementation of the Cuthill-McKee algorithm.
34 |
35 | Permute a symmetric binary matrix into a band matrix form with a small bandwidth.
36 |
37 | Parameters
38 | ----------
39 | matrix : ndarray, dtype=bool, shape = [n, n]
40 | The matrix is internally converted to a symmetric matrix by setting each element [i,j] to True if either
41 | [i,j] or [j,i] evaluates to true.
42 |
43 | Returns
44 | -------
45 | order : list of int
46 | Permutation intices
47 |
48 | Examples
49 | --------
50 | >>> A = np.array([[0,0,1,1], [0,0,0,0], [1,0,1,0], [1,0,0,0]])
51 | >>> p = cuthill_mckee(A)
52 | >>> A
53 | array([[0, 0, 1, 1],
54 | [0, 0, 0, 0],
55 | [1, 0, 1, 0],
56 | [1, 0, 0, 0]])
57 | >>> A[p,:][:,p]
58 | array([[0, 0, 0, 0],
59 | [0, 0, 1, 0],
60 | [0, 1, 0, 1],
61 | [0, 0, 1, 1]])
62 | """
63 | matrix = np.atleast_2d(matrix)
64 | n, m = matrix.shape
65 | assert(n == m)
66 |
67 | # make sure the matrix is really symmetric. This is equivalent to
68 | # converting a directed adjacency matrix into a undirected adjacency matrix.
69 | matrix = np.logical_or(matrix, matrix.T)
70 |
71 | degree = np.sum(matrix, 0)
72 | order = [np.argmin(degree)]
73 |
74 | for i in range(n):
75 | adj = np.nonzero(matrix[order[i]])[0]
76 | adj = [a for a in adj if a not in order]
77 | if not adj:
78 | idx = [i for i in range(n) if i not in order]
79 | order.append(idx[np.argmin(degree[idx])])
80 | else:
81 | if len(adj) == 1:
82 | order.append(adj[0])
83 | else:
84 | adj = np.asarray(adj)
85 | i = adj[np.argsort(degree[adj])]
86 | order.extend(i.tolist())
87 | if len(order) == n:
88 | break
89 |
90 | return order
91 |
92 |
93 | #noinspection PyPep8Naming
94 | class memoize(object):
95 | """Cache the return value of a method.
96 |
97 | This class is meant to be used as a decorator of methods. The return value
98 | from a given method invocation will be cached on the instance whose method
99 | was invoked. All arguments passed to a method decorated with memoize must
100 | be hashable.
101 |
102 | If a memoized method is invoked directly on its class the result will not
103 | be cached. Instead the method will be invoked like a static method:
104 | """
105 |
106 | def __init__(self, func):
107 | self.func = func
108 |
109 | #noinspection PyUnusedLocal
110 | def __get__(self, obj, objtype=None):
111 | if obj is None:
112 | return self.func
113 | return partial(self, obj)
114 |
115 | def __call__(self, *args, **kw):
116 | obj = args[0]
117 | try:
118 | cache = obj.__cache
119 | except AttributeError:
120 | cache = obj.__cache = {}
121 | key = (self.func, args[1:], frozenset(kw.items()))
122 | try:
123 | res = cache[key]
124 | except KeyError:
125 | res = cache[key] = self.func(*args, **kw)
126 | return res
127 |
128 |
129 | def cartesian(arrays, out=None):
130 | """Generate a cartesian product of input arrays.
131 |
132 | Parameters
133 | ----------
134 | arrays : list of array-like
135 | 1-D arrays to form the cartesian product of.
136 | out : ndarray
137 | Array to place the cartesian product in.
138 |
139 | Returns
140 | -------
141 | out : ndarray
142 | 2-D array of shape (M, len(arrays)) containing cartesian products
143 | formed of input arrays.
144 |
145 | Examples
146 | --------
147 | >>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
148 | array([[1, 4, 6],
149 | [1, 4, 7],
150 | [1, 5, 6],
151 | [1, 5, 7],
152 | [2, 4, 6],
153 | [2, 4, 7],
154 | [2, 5, 6],
155 | [2, 5, 7],
156 | [3, 4, 6],
157 | [3, 4, 7],
158 | [3, 5, 6],
159 | [3, 5, 7]])
160 |
161 | References
162 | ----------
163 | http://stackoverflow.com/a/1235363/3005167
164 |
165 | """
166 |
167 | arrays = [np.asarray(x) for x in arrays]
168 | dtype = arrays[0].dtype
169 |
170 | n = np.prod([x.size for x in arrays])
171 | if out is None:
172 | out = np.zeros([n, len(arrays)], dtype=dtype)
173 |
174 | m = n // arrays[0].size
175 | out[:, 0] = np.repeat(arrays[0], m)
176 | if arrays[1:]:
177 | cartesian(arrays[1:], out=out[0:m, 1:])
178 | for j in range(1, arrays[0].size):
179 | out[j * m: (j + 1) * m, 1:] = out[0:m, 1:]
180 | return out
181 |
--------------------------------------------------------------------------------
/scot/xvschema.py:
--------------------------------------------------------------------------------
1 | # Released under The MIT License (MIT)
2 | # http://opensource.org/licenses/MIT
3 | # Copyright (c) 2013 SCoT Development Team
4 |
5 | """ Cross-validation schemas """
6 |
7 | from __future__ import division
8 |
9 | import numpy as np
10 | from numpy import sort
11 | from functools import partial
12 |
13 |
14 | def singletrial(num_trials, skipstep=1):
15 | """ Single-trial cross-validation schema
16 |
17 | Use one trial for training, all others for testing.
18 |
19 | Parameters
20 | ----------
21 | num_trials : int
22 | Total number of trials
23 | skipstep : int
24 | only use every `skipstep` trial for training
25 |
26 | Returns
27 | -------
28 | gen : generator object
29 | the generator returns tuples (trainset, testset)
30 | """
31 | for t in range(0, num_trials, skipstep):
32 | trainset = [t]
33 | testset = [i for i in range(trainset[0])] + \
34 | [i for i in range(trainset[-1] + 1, num_trials)]
35 | testset = sort([t % num_trials for t in testset])
36 | yield trainset, testset
37 |
38 |
39 | def multitrial(num_trials, skipstep=1):
40 | """ Multi-trial cross-validation schema
41 |
42 | Use one trial for testing, all others for training.
43 |
44 | Parameters
45 | ----------
46 | num_trials : int
47 | Total number of trials
48 | skipstep : int
49 | only use every `skipstep` trial for testing
50 |
51 | Returns
52 | -------
53 | gen : generator object
54 | the generator returns tuples (trainset, testset)
55 | """
56 | for t in range(0, num_trials, skipstep):
57 | testset = [t]
58 | trainset = [i for i in range(testset[0])] + \
59 | [i for i in range(testset[-1] + 1, num_trials)]
60 | trainset = sort([t % num_trials for t in trainset])
61 | yield trainset, testset
62 |
63 |
64 | def splitset(num_trials, skipstep=None):
65 | """ Split-set cross validation
66 |
67 | Use half the trials for training, and the other half for testing. Then
68 | repeat the other way round.
69 |
70 | Parameters
71 | ----------
72 | num_trials : int
73 | Total number of trials
74 | skipstep : int
75 | unused
76 |
77 | Returns
78 | -------
79 | gen : generator object
80 | the generator returns tuples (trainset, testset)
81 | """
82 | split = num_trials // 2
83 |
84 | a = list(range(0, split))
85 | b = list(range(split, num_trials))
86 | yield a, b
87 | yield b, a
88 |
89 |
90 | def make_nfold(n):
91 | """ n-fold cross validation
92 |
93 | Use each of n blocks for testing once.
94 |
95 | Parameters
96 | ----------
97 | n : int
98 | number of blocks
99 |
100 | Returns
101 | -------
102 | gengen : func
103 | a function that returns the generator
104 | """
105 | return partial(_nfold, n=n)
106 |
107 |
108 | def _nfold(num_trials, skipstep=None, n='unset'):
109 | blocksize = int(np.ceil(num_trials / n))
110 | for i in range(0, num_trials, blocksize):
111 | testset = [k for k in (i + np.arange(blocksize)) if k < num_trials]
112 | trainset = [i for i in range(testset[0])] + \
113 | [i for i in range(testset[-1] + 1, num_trials)]
114 | trainset = sort([t % num_trials for t in trainset])
115 | yield trainset, testset
116 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [bdist_wheel]
2 | universal=1
3 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from setuptools import setup
4 | from codecs import open
5 | from scot import __version__ as ver
6 |
7 |
8 | with open('README.md', encoding='utf-8') as readme:
9 | long_description = readme.read()
10 |
11 | setup(
12 | name='scot',
13 | version=ver,
14 | description='EEG/MEG Source Connectivity Toolbox',
15 | long_description=long_description,
16 | url='https://github.com/scot-dev/scot',
17 | author='SCoT Development Team',
18 | author_email='scotdev@googlegroups.com',
19 | license='MIT',
20 | classifiers=[
21 | 'Development Status :: 4 - Beta',
22 | 'Intended Audience :: Science/Research',
23 | 'Topic :: Scientific/Engineering',
24 | 'License :: OSI Approved :: MIT License',
25 | 'Programming Language :: Python :: 3',
26 | 'Programming Language :: Python :: 3.2',
27 | 'Programming Language :: Python :: 3.3',
28 | 'Programming Language :: Python :: 3.4',
29 | 'Programming Language :: Python :: 3.5'
30 | ],
31 | keywords='source connectivity EEG MEG ICA',
32 | packages=['scot', 'scot.eegtopo', 'scot.external'],
33 | package_data={'scot': ['scot.ini']}
34 | )
35 |
--------------------------------------------------------------------------------