├── pytorch_shearlets
├── __init__.py
├── shearlets.py
├── filters.py
└── utils.py
├── tests
├── barbara.jpg
└── test_shearlets.py
├── README.md
├── pyproject.toml
└── LICENSE
/pytorch_shearlets/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/barbara.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/peck94/pytorch_shearlets/HEAD/tests/barbara.jpg
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # 2D Shearlet Transforms in Pytorch
2 |
3 | This repository contains a rudimentary implementation of the shearlet transform in PyTorch. It is a modified version of the [pyShearLab](https://github.com/stefanloock/pyshearlab/tree/master) library by Stefan Loock, licensed under GPL-3.0.
4 |
5 | ## Installation
6 |
7 | You can install this package directly using pip:
8 |
9 | ```console
10 | pip install git+https://github.com/peck94/pytorch_shearlets
11 | ```
12 |
13 | Any dependencies should be taken care of automatically.
14 |
15 | ## Usage
16 |
17 | Usage examples can be found in the `examples` folder.
18 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["hatchling"]
3 | build-backend = "hatchling.build"
4 |
5 | [project]
6 | name = "pytorch-shearlets"
7 | version = "0.0.2"
8 | authors = [
9 | { name="Jonathan Peck", email="jonathan.peck@ugent.be" },
10 | ]
11 | description = "2D Shearlet Transforms in PyTorch"
12 | readme = "README.md"
13 | requires-python = ">=3.7"
14 | dependencies = [
15 | "torch",
16 | "pyshearlab@git+https://github.com/stefanloock/pyshearlab"
17 | ]
18 | classifiers = [
19 | "Programming Language :: Python :: 3",
20 | "License :: OSI Approved :: MIT License",
21 | "Operating System :: OS Independent",
22 | ]
23 |
24 | [project.urls]
25 | "Homepage" = "https://github.com/peck94/pytorch_shearlets"
26 | "Bug Tracker" = "https://github.com/peck94/pytorch_shearlets/issues"
27 |
28 | [tool.hatch.metadata]
29 | allow-direct-references = true
30 |
--------------------------------------------------------------------------------
/tests/test_shearlets.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | import matplotlib.pyplot as plt
4 |
5 | import numpy as np
6 |
7 | import torch
8 |
9 | from PIL import Image
10 |
11 | from pytorch_shearlets.shearlets import ShearletSystem
12 | from pytorch_shearlets.utils import SLcomputePSNR
13 |
14 | @pytest.fixture(scope='module')
15 | def device():
16 | return torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
17 |
18 | @pytest.fixture(scope='module')
19 | def shearletSystem(device):
20 | return ShearletSystem(512, 512, 2, device=device)
21 |
22 | def test_inverse(shearletSystem, device):
23 | """Validate the inverse."""
24 |
25 | # create sample
26 | X = torch.randn(1, 1, 512, 512).to(device)
27 |
28 | # decomposition
29 | coeffs = shearletSystem.decompose(X)
30 |
31 | # reconstruction
32 | Xrec = shearletSystem.reconstruct(coeffs)
33 | assert Xrec.shape == X.shape
34 |
35 | assert torch.linalg.norm(X - Xrec) < 1e-5 * torch.linalg.norm(X)
36 |
37 | def test_call(shearletSystem, device):
38 | """Validate the regular call."""
39 |
40 | def compute_psnr(img, x_hat):
41 | mse = np.mean((img.cpu().numpy() - x_hat.cpu().numpy()) ** 2)
42 | psnr = 20 * np.log10(255 / np.sqrt(mse))
43 | return psnr
44 |
45 | # load data
46 | sigma = 25
47 | img = torch.from_numpy(np.array(Image.open('tests/barbara.jpg')).reshape(1, 1, 512, 512)).to(device)
48 |
49 | # compute mean PSNR
50 | psnrs = []
51 | for _ in range(10):
52 | # create noise
53 | noise = sigma * torch.randn(512, 512).to(device)
54 |
55 | # decomposition
56 | coeffs = shearletSystem.decompose(img + noise)
57 |
58 | # thresholding
59 | thresholdingFactor = 3
60 | weights = shearletSystem.RMS * torch.ones_like(coeffs)
61 | zero_indices = (torch.abs(coeffs) / (thresholdingFactor * weights * sigma) < 1)
62 | new_coeffs = torch.where(zero_indices, torch.zeros_like(coeffs), coeffs)
63 |
64 | # reconstruction
65 | x_hat = shearletSystem.reconstruct(new_coeffs)
66 |
67 | # compute PSNR
68 | psnr1 = compute_psnr(img, img + noise)
69 | psnr2 = compute_psnr(img, x_hat)
70 | psnrs.append(psnr2)
71 |
72 | assert psnr1 < psnr2, f'PSNR of noisy image ({psnr1:.2f}) must be lower than PSNR of restored image ({psnr2:.2f})'
73 |
74 | # compute result
75 | mean_psnr = np.mean(psnrs)
76 | std_psnr = np.std(psnrs)
77 | print(f'PSNR: {mean_psnr:.2f} dB ({std_psnr:.2f})')
78 |
79 | assert mean_psnr > 25, f'PSNR is too low: {mean_psnr:.2f} < 25 dB'
80 |
--------------------------------------------------------------------------------
/pytorch_shearlets/shearlets.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch.fft import fft2, ifft2, fftshift, ifftshift
3 |
4 | import numpy as np
5 |
6 | from .filters import dfilters, modulate2, MakeONFilter
7 | from .utils import SLprepareFilters2D, SLgetShearletIdxs2D, SLgetShearlets2D, upAndMergeBands, subsampleBands
8 |
9 | class ShearletSystem:
10 | """
11 | Compute a 2D shearlet system.
12 | """
13 | def __init__(self, height, width, scales, fname='dmaxflat4', qmtype=None, device=torch.device('cpu')):
14 | levels = np.ceil(np.arange(1, scales + 1)/2).astype(int)
15 |
16 | h0, h1 = dfilters(fname, 'd')
17 | h0 /= np.sqrt(2)
18 | h1 /= np.sqrt(2)
19 |
20 | directionalFilter = modulate2(h0, 'c')
21 |
22 | if qmtype is not None:
23 | if qmtype.lower() =="meyer24":
24 | quadratureMirrorFilter = MakeONFilter("Meyer",24)
25 | elif qmtype.lower() == "meyer32":
26 | quadratureMirrorFilter = MakeONFilter("Meyer",32)
27 | else:
28 | quadratureMirrorFilter = np.array([0.0104933261758410, -0.0263483047033631, -0.0517766952966370,
29 | 0.276348304703363, 0.582566738241592, 0.276348304703363,
30 | -0.0517766952966369, -0.0263483047033631, 0.0104933261758408])
31 |
32 | self.preparedFilters = SLprepareFilters2D(height, width, scales, levels, directionalFilter, quadratureMirrorFilter)
33 | self.shearletIdxs = SLgetShearletIdxs2D(levels, 0)
34 | self.shearlets, self.RMS, self.dualFrameWeights = SLgetShearlets2D(self.preparedFilters, self.shearletIdxs)
35 | self.device = device
36 |
37 | self.shearlets = torch.from_numpy(self.shearlets.reshape(1, 1, *self.shearlets.shape)).to(device)
38 | self.dualFrameWeights = torch.from_numpy(self.dualFrameWeights).to(device)
39 | self.RMS = torch.from_numpy(self.RMS).to(device)
40 |
41 | def decompose(self, x):
42 | """
43 | Shearlet decomposition of 2D data.
44 |
45 | :param x: Input images. Tensor of shape [N, C, H, W].
46 | :return: Shearlet coefficients. Tensor of shape [N, C, H, W, M].
47 | """
48 | # get data in frequency domain
49 | x_freq = torch.unsqueeze(fftshift(fft2(ifftshift(x.to(self.device)))), -1)
50 |
51 | # compute shearlet coefficients at each scale
52 | coeffs = fftshift(
53 | ifft2(
54 | ifftshift(
55 | x_freq * torch.conj(self.shearlets), dim=[0, 1, 2, 3]),
56 | dim=[-3, -2]),
57 | dim=[0, 1, 2, 3])
58 |
59 | # return real coefficients
60 | return torch.real(coeffs)
61 |
62 | def reconstruct(self, coeffs):
63 | """
64 | 2D reconstruction of shearlet coefficients.
65 |
66 | :param coeffs: Shearlet coefficients. Tensor of shape [N, C, H, W, M].
67 | :return: Reconstructed images. Tensor of shape [N, C, H, W].
68 | """
69 | # compute image values
70 | s = fftshift(
71 | fft2(
72 | ifftshift(coeffs.to(self.device), dim=[0, 1, 2, 3]),
73 | dim=[-3, -2]),
74 | dim=[0, 1, 2, 3]) * self.shearlets
75 | x = fftshift(ifft2(ifftshift((torch.div(torch.sum(s, dim=-1), self.dualFrameWeights)))))
76 |
77 | # return real values
78 | return torch.real(x)
79 |
80 | def decompose_subsample(self, x, decimFactors):
81 | """
82 | 2D Shearlet decomposition followed by subsampling
83 |
84 | Input
85 | -----
86 | x : Input images, tensor of shape [N, C, H, W]
87 | decimFactors : reverse(decimFactors)[i] gives the decimation factor required for scale i
88 |
89 | Output
90 | -----
91 | dictionary R where R[d] gives the tensor [N, C, Hd, Wd, Md] of bands that are decimated by factor d
92 | """
93 | decomposed = self.decompose(x)
94 | bandsDict = subsampleBands(decomposed,self.shearletIdxs[:,1],decimFactors)
95 |
96 | return bandsDict
97 |
98 |
99 | def upsample_reconstruct(self, x, decimFactors):
100 | """
101 | 2D reconstruction of downsampled shearlet bands
102 |
103 | Input
104 | -----
105 | x : dictionary of bands where x[d] gives the tensor [N, C, Hd, Wd, Md] of the Md bands decimated by factor d
106 |
107 | decimFactors : reverse(decimFactors)[i] gives the decimation factor required for scale i
108 |
109 | Output
110 | -----
111 | Reconstructed images. Tensor of shape [N, C, H, W]
112 |
113 | Warning
114 | -----
115 | Might need to clamp to correct range after reconstruction
116 |
117 | """
118 |
119 | merged_bands = upAndMergeBands(x,self.shearletIdxs[:,1],decimFactors)
120 | reconstructed = self.reconstruct(merged_bands)
121 |
122 | return reconstructed
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
623 | How to Apply These Terms to Your New Programs
624 |
625 | If you develop a new program, and you want it to be of the greatest
626 | possible use to the public, the best way to achieve this is to make it
627 | free software which everyone can redistribute and change under these terms.
628 |
629 | To do so, attach the following notices to the program. It is safest
630 | to attach them to the start of each source file to most effectively
631 | state the exclusion of warranty; and each file should have at least
632 | the "copyright" line and a pointer to where the full notice is found.
633 |
634 | {one line to give the program's name and a brief idea of what it does.}
635 | Copyright (C) {year} {name of author}
636 |
637 | This program is free software: you can redistribute it and/or modify
638 | it under the terms of the GNU General Public License as published by
639 | the Free Software Foundation, either version 3 of the License, or
640 | (at your option) any later version.
641 |
642 | This program is distributed in the hope that it will be useful,
643 | but WITHOUT ANY WARRANTY; without even the implied warranty of
644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645 | GNU General Public License for more details.
646 |
647 | You should have received a copy of the GNU General Public License
648 | along with this program. If not, see .
649 |
650 | Also add information on how to contact you by electronic and paper mail.
651 |
652 | If the program does terminal interaction, make it output a short
653 | notice like this when it starts in an interactive mode:
654 |
655 | {project} Copyright (C) {year} {fullname}
656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657 | This is free software, and you are welcome to redistribute it
658 | under certain conditions; type `show c' for details.
659 |
660 | The hypothetical commands `show w' and `show c' should show the appropriate
661 | parts of the General Public License. Of course, your program's commands
662 | might be different; for a GUI interface, you would use an "about box".
663 |
664 | You should also get your employer (if you work as a programmer) or school,
665 | if any, to sign a "copyright disclaimer" for the program, if necessary.
666 | For more information on this, and how to apply and follow the GNU GPL, see
667 | .
668 |
669 | The GNU General Public License does not permit incorporating your program
670 | into proprietary programs. If your program is a subroutine library, you
671 | may consider it more useful to permit linking proprietary applications with
672 | the library. If this is what you want to do, use the GNU Lesser General
673 | Public License instead of this License. But first, please read
674 | .
675 |
--------------------------------------------------------------------------------
/pytorch_shearlets/filters.py:
--------------------------------------------------------------------------------
1 | """
2 | This module contains all neccessary files to compute the filters used
3 | in the pyShearLab2D toolbox. Most of these files are taken from different
4 | MATLAB toolboxes and were translated to Python. Credit is given in each
5 | individual function.
6 |
7 |
8 | Stefan Loock, February 2, 2017 [sloock@gwdg.de]
9 | """
10 |
11 | from __future__ import division
12 | import numpy as np
13 | from scipy import signal as signal
14 |
15 |
16 | try:
17 | import pyfftw
18 | fftlib = pyfftw.interfaces.numpy_fft
19 | pyfftw.interfaces.cache.enable()
20 | except ImportError:
21 | fftlib = np.fft
22 |
23 |
24 | def MakeONFilter(Type,Par=1):
25 | """
26 | This is a rewrite of the original Matlab implementation of MakeONFilter.m
27 | from the WaveLab850 toolbox.
28 |
29 | MakeONFilter -- Generate Orthonormal QMF Filter for Wavelet Transform
30 |
31 | Usage:
32 |
33 | qmf = MakeONFilter(Type, Par)
34 |
35 | Inputs:
36 |
37 | Type: string: 'Haar', 'Beylkin', 'Coiflet', 'Daubechies',
38 | 'Symmlet', 'Vaidyanathan', 'Battle'
39 |
40 | Outputs:
41 |
42 | qmf: quadrature mirror filter
43 |
44 | Description
45 |
46 | Meyer filter; Par can be 24 or 32
47 |
48 | The Haar filter (which could be considered a Daubechies-2) was the
49 | first wavelet, though not called as such, and is discontinuous.
50 |
51 | The Beylkin filter places roots for the frequency response function
52 | close to the Nyquist frequency on the real axis.
53 |
54 | The Coiflet filters are designed to give both the mother and father
55 | wavelets 2*Par vanishing moments; here Par may be one of 1,2,3,4 or 5.
56 |
57 | The Daubechies filters are minimal phase filters that generate wavelets
58 | which have a minimal support for a given number of vanishing moments.
59 | They are indexed by their length, Par, which may be one of
60 | 4,6,8,10,12,14,16,18 or 20. The number of vanishing moments is par/2.
61 |
62 | Symmlets are also wavelets within a minimum size support for a given
63 | number of vanishing moments, but they are as symmetrical as possible,
64 | as opposed to the Daubechies filters which are highly asymmetrical.
65 | They are indexed by Par, which specifies the number of vanishing
66 | moments and is equal to half the size of the support. It ranges
67 | from 4 to 10.
68 |
69 | The Vaidyanathan filter gives an exact reconstruction, but does not
70 | satisfy any moment condition. The filter has been optimized for
71 | speech coding.
72 |
73 | The Battle-Lemarie filter generate spline orthogonal wavelet basis.
74 | The parameter Par gives the degree of the spline. The number of
75 | vanishing moments is Par+1.
76 |
77 | See Also: FWT_PO, IWT_PO, FWT2_PO, IWT2_PO, WPAnalysis
78 |
79 | References: The books by Daubechies and Wickerhauser.
80 |
81 | Part of WaveLab850 (http://www-stat.stanford.edu/~wavelab/)
82 | """
83 | if Type =="Meyer":
84 | onFilter = np.array([0, -0.000001509740857, 0.000001278766757, 0.000000449585560, -0.000002096568870, 0.000001723223554, 0.000000698082276, -0.000002879408033, 0.000002383148395, 0.000000982515602, -0.000004217789186, 0.000003353501538,
85 | 0.000001674721859, -0.000006034501342, 0.000004837555802, 0.000002402288023, -0.000009556309846, 0.000007216527695, 0.000004849078300, -0.000014206928581, 0.000010503914271, 0.000006187580298, -0.000024438005846, 0.000020106387691,
86 | 0.000014993523600, -0.000046428764284, 0.000032341311914, 0.000037409665760, -0.000102779005085, 0.000024461956845, 0.000149713515389, -0.000075592870255, -0.000139913148217, -0.000093512893880, 0.000161189819725, 0.000859500213762,
87 | -0.000578185795273, -0.002702168733939, 0.002194775336459, 0.006045510596456, -0.006386728618548, -0.011044641900539, 0.015250913158586, 0.017403888210177, -0.032094063354505, -0.024321783959519, 0.063667300884468, 0.030621243943425,
88 | -0.132696615358862, -0.035048287390595, 0.444095030766529, 0.743751004903787, 0.444095030766529, -0.035048287390595, -0.132696615358862, 0.030621243943425, 0.063667300884468, -0.024321783959519, -0.032094063354505, 0.017403888210177,
89 | 0.015250913158586, -0.011044641900539, -0.006386728618548, 0.006045510596456, 0.002194775336459, -0.002702168733939, -0.000578185795273, 0.000859500213762, 0.000161189819725, -0.000093512893880, -0.000139913148217, -0.000075592870255,
90 | 0.000149713515389, 0.000024461956845, -0.000102779005085, 0.000037409665760, 0.000032341311914, -0.000046428764284, 0.000014993523600, 0.000020106387691, -0.000024438005846, 0.000006187580298, 0.000010503914271, -0.000014206928581,
91 | 0.000004849078300, 0.000007216527695, -0.000009556309846, 0.000002402288023, 0.000004837555802, -0.000006034501342, 0.000001674721859, 0.000003353501538, -0.000004217789186, 0.000000982515602, 0.000002383148395, -0.000002879408033,
92 | 0.000000698082276, 0.000001723223554, -0.000002096568870, 0.000000449585560, 0.000001278766757, -0.000001509740857])
93 | if Par == 32:
94 | onFilter = onFilter[35:102-35] # 32 taps
95 | elif Par == 24:
96 | onFilter = onFilter[39:102-39] # 24 taps
97 | else:
98 | raise NotImplementedError(f"Par must be 24 or 32, not {Par}")
99 | if Type == 'Haar':
100 | onFilter = np.array([1/np.sqrt(2), 1/np.sqrt(2)])
101 | if Type == 'Beylkin':
102 | onFilter = np.array([.099305765374, .424215360813, .699825214057,
103 | .449718251149, -.110927598348, -.264497231446,
104 | .026900308804, .155538731877, -.017520746267,
105 | -.088543630623, .019679866044, .042916387274,
106 | -.017460408696, -.014365807969, .010040411845,
107 | .001484234782, -.002736031626, .000640485329])
108 | if Type == 'Coiflet':
109 | if Par == 1:
110 | onFilter = np.array([.038580777748, -.126969125396, -.077161555496,
111 | .607491641386, .745687558934, .226584265197])
112 | elif Par == 2:
113 | onFilter = np.array([.016387336463, -.041464936782, -.067372554722,
114 | .386110066823, .812723635450, .417005184424,
115 | -.076488599078, -.059434418646, .023680171947,
116 | .005611434819, -.001823208871, -.000720549445])
117 | elif Par == 3:
118 | onFilter = np.array([-.003793512864, .007782596426, .023452696142,
119 | -.065771911281, -.061123390003, .405176902410,
120 | .793777222626, .428483476378, -.071799821619,
121 | -.082301927106, .034555027573, .015880544864,
122 | -.009007976137, -.002574517688, .001117518771,
123 | .000466216960, -.000070983303, -.000034599773])
124 | elif Par == 4:
125 | onFilter = np.array([.000892313668, -.001629492013, -.007346166328,
126 | .016068943964, .026682300156, -.081266699680,
127 | -.056077313316, .415308407030, .782238930920,
128 | .434386056491, -.066627474263, -.096220442034,
129 | .039334427123, .025082261845, -.015211731527,
130 | -.005658286686, .003751436157, .001266561929,
131 | -.000589020757, -.000259974552, .000062339034,
132 | .000031229876, -.000003259680, -.000001784985])
133 | elif Par == 5:
134 | onFilter = np.array([-.000212080863, .000358589677, .002178236305,
135 | -.004159358782, -.010131117538, .023408156762,
136 | .028168029062, -.091920010549, -.052043163216,
137 | .421566206729, .774289603740, .437991626228,
138 | -.062035963906, -.105574208706, .041289208741,
139 | .032683574283, -.019761779012, -.009164231153,
140 | .006764185419, .002433373209, -.001662863769,
141 | -.000638131296, .000302259520, .000140541149,
142 | -.000041340484, -.000021315014, .000003734597,
143 | .000002063806, -.000000167408, -.000000095158])
144 | if Type == 'Daubechies':
145 | if Par == 4:
146 | onFilter = np.array([.482962913145, .836516303738, .224143868042,
147 | -.129409522551])
148 | elif Par == 6:
149 | onFilter = np.array([.332670552950, .806891509311, .459877502118,
150 | -.135011020010, -.085441273882, .035226291882])
151 | elif Par == 8:
152 | onFilter = np.array([.230377813309, .714846570553, .630880767930,
153 | -.027983769417, -.187034811719, .030841381836,
154 | .032883011667, -.010597401785])
155 | elif Par == 10:
156 | onFilter = np.array([.160102397974, .603829269797, .724308528438,
157 | .138428145901, -.242294887066, -.032244869585,
158 | .077571493840, -.006241490213, -.012580751999,
159 | .003335725285])
160 | elif Par == 12:
161 | onFilter = np.array([.111540743350, .494623890398, .751133908021,
162 | .315250351709, -.226264693965, -.129766867567,
163 | .097501605587, .027522865530, -.031582039317,
164 | .000553842201, .004777257511, -.001077301085])
165 | elif Par == 14:
166 | onFilter = np.array([.077852054085, .396539319482, .729132090846,
167 | .469782287405, -.143906003929, -.224036184994,
168 | .071309219267, .080612609151, -.038029936935,
169 | -.016574541631, .012550998556, .000429577973,
170 | -.001801640704, .000353713800])
171 | elif Par == 16:
172 | onFilter = np.array([.054415842243, .312871590914, .675630736297,
173 | .585354683654, -.015829105256, -.284015542962,
174 | .000472484574, .128747426620, -.017369301002,
175 | -.044088253931, .013981027917, .008746094047,
176 | -.004870352993, -.000391740373, .000675449406,
177 | -.000117476784])
178 | elif Par==18:
179 | onFilter = np.array([.038077947364, .243834674613, .604823123690,
180 | .657288078051, .133197385825, -.293273783279,
181 | -.096840783223, .148540749338, .030725681479,
182 | -.067632829061, .000250947115, .022361662124,
183 | -.004723204758, -.004281503682, .001847646883,
184 | .000230385764, -.000251963189, .000039347320])
185 | elif Par==20:
186 | onFilter = np.array([.026670057901, .188176800078, .527201188932,
187 | .688459039454, .281172343661, -.249846424327,
188 | -.195946274377, .127369340336, .093057364604,
189 | -.071394147166, -.029457536822, .033212674059,
190 | .003606553567, -.010733175483, .001395351747,
191 | .001992405295, -.000685856695, -.000116466855,
192 | .000093588670, -.000013264203])
193 | if Type == 'Symmlet':
194 | if Par == 4:
195 | onFilter = np.array([-.107148901418, -.041910965125, .703739068656,
196 | 1.136658243408, .421234534204, -.140317624179,
197 | -.017824701442, .045570345896])
198 | elif Par == 5:
199 | onFilter = np.array([.038654795955, .041746864422, -.055344186117,
200 | .281990696854, 1.023052966894, .896581648380,
201 | .023478923136, -.247951362613, -.029842499869,
202 | .027632152958])
203 | elif Par == 6:
204 | onFilter = np.array([.021784700327, .004936612372, -.166863215412,
205 | -.068323121587, .694457972958, 1.113892783926,
206 | .477904371333, -.102724969862, -.029783751299,
207 | .063250562660, .002499922093, -.011031867509])
208 | elif Par == 7:
209 | onFilter = np.array([.003792658534, -.001481225915, -.017870431651,
210 | .043155452582, .096014767936, -.070078291222,
211 | .024665659489, .758162601964, 1.085782709814,
212 | .408183939725, -.198056706807, -.152463871896,
213 | .005671342686, .014521394762])
214 | elif Par == 8:
215 | onFilter = np.array([.002672793393, -.000428394300, -.021145686528,
216 | .005386388754, .069490465911, -.038493521263,
217 | -.073462508761, .515398670374, 1.099106630537,
218 | .680745347190, -.086653615406, -.202648655286,
219 | .010758611751, .044823623042, -.000766690896,
220 | -.004783458512])
221 | elif Par == 9:
222 | onFilter = np.array([.001512487309, -.000669141509, -.014515578553,
223 | .012528896242, .087791251554, -.025786445930,
224 | -.270893783503, .049882830959, .873048407349,
225 | 1.015259790832, .337658923602, -.077172161097,
226 | .000825140929, .042744433602, -.016303351226,
227 | -.018769396836, .000876502539, .001981193736])
228 | elif Par == 10:
229 | onFilter = np.array([.001089170447, .000135245020, -.012220642630,
230 | -.002072363923, .064950924579, .016418869426,
231 | -.225558972234, -.100240215031, .667071338154,
232 | 1.088251530500, .542813011213, -.050256540092,
233 | -.045240772218, .070703567550, .008152816799,
234 | -.028786231926, -.001137535314, .006495728375,
235 | .000080661204, -.000649589896])
236 | if Type == 'Vaidyanathan':
237 | onFilter = np.array([-.000062906118, .000343631905, -.000453956620,
238 | -.000944897136, .002843834547, .000708137504,
239 | -.008839103409, .003153847056, .019687215010,
240 | -.014853448005, -.035470398607, .038742619293,
241 | .055892523691, -.077709750902, -.083928884366,
242 | .131971661417, .135084227129, -.194450471766,
243 | -.263494802488, .201612161775, .635601059872,
244 | .572797793211, .250184129505, .045799334111])
245 | if Type == 'Battle':
246 | if Par == 1:
247 | onFilterTmp = np.array([0.578163, 0.280931, -0.0488618, -0.0367309,
248 | 0.012003, 0.00706442, -0.00274588,
249 | -0.00155701, 0.000652922, 0.000361781,
250 | -0.000158601, -0.0000867523])
251 | elif Par == 3:
252 | onFilterTmp = np.array([0.541736, 0.30683, -0.035498, -0.0778079,
253 | 0.0226846, 0.0297468, -0.0121455,
254 | -0.0127154, 0.00614143, 0.00579932,
255 | -0.00307863, -0.00274529, 0.00154624,
256 | 0.00133086, -0.000780468, -0.00065562,
257 | 0.000395946, 0.000326749, -0.000201818,
258 | -0.000164264, 0.000103307])
259 | elif Par == 5:
260 | onFilterTmp = np.array([0.528374, 0.312869, -0.0261771, -0.0914068,
261 | 0.0208414, 0.0433544, -0.0148537, -0.0229951,
262 | 0.00990635, 0.0128754, -0.00639886, -0.00746848,
263 | 0.00407882, 0.00444002, -0.00258816, -0.00268646,
264 | 0.00164132, 0.00164659, -0.00104207, -0.00101912,
265 | 0.000662836, 0.000635563, -0.000422485, -0.000398759,
266 | 0.000269842, 0.000251419, -0.000172685, -0.000159168,
267 | 0.000110709, 0.000101113])
268 | onFilter = np.zeros(2*onFilterTmp.size-1)
269 | onFilter[onFilterTmp.size-1:2*onFilterTmp.size] = onFilterTmp;
270 | onFilter[0:onFilterTmp.size-1] = onFilterTmp[onFilterTmp.size-1:0:-1]
271 | return onFilter / np.linalg.norm(onFilter)
272 |
273 | """
274 | Copyright (c) 1993-5. Jonathan Buckheit and David Donoho
275 |
276 | Part of Wavelab Version 850
277 | Built Tue Jan 3 13:20:40 EST 2006
278 | This is Copyrighted Material
279 | For Copying permissions see COPYING.m
280 | Comments? e-mail wavelab@stat.stanford.edu
281 | """
282 |
283 |
284 | def dfilters(fname, type):
285 | """
286 | This is a translation of the original Matlab implementation of dfilters.m
287 | from the Nonsubsampled Contourlet Toolbox. The following comment is from
288 | the original and only applies in so far that not all of the directional
289 | filters are implemented in this Python version but only those which are
290 | needed for the shearlet toolbox.
291 |
292 | DFILTERS Generate directional 2D filters
293 |
294 | [h0, h1] = dfilters(fname, type)
295 |
296 | Input:
297 |
298 | fname: Filter name. Available 'fname' are:
299 | 'haar': the "Haar" filters
300 | 'vk': McClellan transformed of the filter
301 | from the VK book
302 | 'ko': orthogonal filter in the Kovacevics
303 | paper
304 | 'kos': smooth 'ko' filter
305 | 'lax': 17 x 17 by Lu, Antoniou and Xu
306 | 'sk': 9 x 9 by Shah and Kalker
307 | 'cd': 7 and 9 McClellan transformed by
308 | Cohen and Daubechies
309 | 'pkva': ladder filters by Phong et al.
310 | 'oqf_362': regular 3 x 6 filter
311 | 'dvmlp': regular linear phase biorthogonal filter
312 | with 3 dvm
313 | 'sinc': ideal filter (*NO perfect recontruction*)
314 | 'dmaxflat': diamond maxflat filters obtained from a three
315 | stage ladder
316 |
317 | type: 'd' or 'r' for decomposition or reconstruction filters
318 |
319 | Output:
320 | h0, h1: diamond filter pair (lowpass and highpass)
321 |
322 | To test those filters (for the PR condition for the FIR case), verify that:
323 | conv2(h0, modulate2(h1, 'b')) + conv2(modulate2(h0, 'b'), h1) = 2
324 | (replace + with - for even size filters)
325 |
326 | To test for orthogonal filter
327 | conv2(h, reverse2(h)) + modulate2(conv2(h, reverse2(h)), 'b') = 2
328 |
329 | Part of the Nonsubsampled Contourlet Toolbox
330 | (http://www.mathworks.de/matlabcentral/fileexchange/10049-nonsubsampled-contourlet-toolbox)
331 | """
332 | if fname == 'haar':
333 | if type.lower() == 'd':
334 | h0 = np.array([1, 1]) / np.sqrt(2)
335 | h1 = np.array([-1, 1]) / np.sqrt(2)
336 | else:
337 | h0 = np.array([1, 1]) / np.sqrt(2)
338 | h1 = np.array([1, -1]) / np.sqrt(2)
339 | elif fname == 'vk': # in Vetterli and Kovacevic book
340 | if type.lower() == 'd':
341 | h0 = np.array([1, 2, 1]) / 4
342 | h1 = np.array([-1, -2, 6, -2, -1]) / 4
343 | else:
344 | h0 = np.array([-1, 2, 6, 2, -1]) / 4
345 | h1 = np.array([-1, 2, -1]) / 4
346 | t = np.array([[0, 1, 0], [1, 0, 1], [0, 1, 0]]) / 4 # diamon kernel
347 | h0 = mctrans(h0, t)
348 | h1 = mctrans(h1, t)
349 | elif fname == 'ko': # orthogonal filters in Kovacevics thesis
350 | a0 = 2
351 | a1 = 0.5
352 | a2 = 1
353 | h0 = np.array([[0, -a1, -a0*a1, 0],
354 | [-a2, -a0*a2, -a0, 1],
355 | [0, a0*a1*a2, -a1*a2, 0]])
356 | # h1 = qmf2(h0)
357 | h1 = np.array([[0, -a1*a2, -a0*a1*a2, 0],
358 | [1, a0, -a0*a2, a2],
359 | [0, -a0*a1, a1, 0]])
360 | # normalize filter sum and norm
361 | norm = np.sqrt(2) / np.sum(h0)
362 | h0 = h0 * norm
363 | h1 = h1 * norm
364 |
365 | if type == 'r':
366 | # reverse filters for reconstruction
367 | h0 = h0[::-1]
368 | h1 = h1[::-1]
369 | elif fname == 'kos': # smooth orthogonal filters in Kovacevics thesis
370 | a0 = -np.sqrt(3)
371 | a1 = -np.sqrt(3)
372 | a2 = 2+np.sqrt(3)
373 |
374 | h0 = np.array([[0, -a1, -a0*a1, 0],
375 | [-a2, -a0*a2, -a0, 1],
376 | [0, a0*a1*a2, -a1*a2, 0]])
377 | # h1 = qmf2(h0)
378 | h1 = np.array([[0, -a1*a2, -a0*a1*a2, 0],
379 | [1, a0, -a0*a2, a2],
380 | [0, -a0*a1, a1, 0]])
381 | # normalize filter sum and norm
382 | norm = np.sqrt(2) / np.sum(h0)
383 | h0 = h0 * norm
384 | h1 = h1 * norm
385 |
386 | if type == 'r':
387 | # reverse filters for reconstruction
388 | h0 = h0[::-1]
389 | h1 = h1[::-1]
390 | elif fname == 'lax': # by lu, antoniou and xu
391 | h = np.array([[-1.2972901e-5, 1.2316237e-4, -7.5212207e-5, 6.3686104e-5,
392 | 9.4800610e-5, -7.5862919e-5, 2.9586164e-4, -1.8430337e-4],
393 | [1.2355540e-4, -1.2780882e-4, -1.9663685e-5, -4.5956538e-5,
394 | -6.5195193e-4, -2.4722942e-4, -2.1538331e-5, -7.0882131e-4],
395 | [-7.5319075e-5, -1.9350810e-5, -7.1947086e-4, 1.2295412e-3,
396 | 5.7411214e-4, 4.4705422e-4, 1.9623554e-3, 3.3596717e-4],
397 | [6.3400249e-5, -2.4947178e-4, 4.4905711e-4, -4.1053629e-3,
398 | -2.8588307e-3, 4.3782726e-3, -3.1690509e-3, -3.4371484e-3],
399 | [9.6404973e-5, -4.6116254e-5, 1.2371871e-3, -1.1675575e-2,
400 | 1.6173911e-2, -4.1197559e-3, 4.4911165e-3, 1.1635130e-2],
401 | [-7.6955555e-5, -6.5618379e-4, 5.7752252e-4, 1.6211426e-2,
402 | 2.1310378e-2, -2.8712621e-3, -4.8422645e-2, -5.9246338e-3],
403 | [2.9802986e-4, -2.1365364e-5, 1.9701350e-3, 4.5047673e-3,
404 | -4.8489158e-2, -3.1809526e-3, -2.9406153e-2, 1.8993868e-1],
405 | [-1.8556637e-4, -7.1279432e-4, 3.3839195e-4, 1.1662001e-2,
406 | -5.9398223e-3, -3.4467920e-3, 1.9006499e-1, 5.7235228e-1]
407 | ])
408 | h0 = np.sqrt(2) * np.append(h, h[:,-2::-1], 1)
409 | h0 = np.append(h0, h0[-2::-1,:], 0)
410 | h1 = modulate2(h0, 'b')
411 | elif fname == 'sk': # by shah and kalker
412 | h = np.array([[0.621729, 0.161889, -0.0126949, -0.00542504, 0.00124838],
413 | [0.161889, -0.0353769, -0.0162751, -0.00499353, 0],
414 | [-0.0126949, -0.0162751, 0.00749029, 0, 0],
415 | [-0.00542504, 0.00499353, 0, 0, 0],
416 | [0.00124838, 0, 0, 0, 0]])
417 | h0 = np.append(h[-1:0:-1, -1:0:-1], h[-1:0:-1,:], 1)
418 | h0 = np.append(h0, np.append(h[:,-1:0:-1], h, 1), 0)*np.sqrt(2)
419 | h1 = modulate2(h0, 'b')
420 | elif fname == 'dvmlp':
421 | q = np.sqrt(2)
422 | b = 0.02
423 | b1 = b*b;
424 | h = np.array([[b/q, 0, -2*q*b, 0, 3*q*b, 0, -2*q*b, 0, b/q],
425 | [0, -1/(16*q), 0, 9/(16*q), 1/q, 9/(16*q), 0, -1/(16*q), 0],
426 | [b/q, 0, -2*q*b, 0, 3*q*b, 0, -2*q*b, 0, b/q]])
427 | g0 = np.array([[-b1/q, 0, 4*b1*q, 0, -14*q*b1, 0, 28*q*b1, 0,
428 | -35*q*b1, 0, 28*q*b1, 0, -14*q*b1, 0, 4*b1*q, 0, -b1/q],
429 | [0, b/(8*q), 0, -13*b/(8*q), b/q, 33*b/(8*q), -2*q*b,
430 | -21*b/(8*q), 3*q*b, -21*b/(8*q), -2*q*b, 33*b/(8*q),
431 | b/q, -13*b/(8*q), 0, b/(8*q), 0],
432 | [-q*b1, 0, -1/(256*q) + 8*q*b1, 0, 9/(128*q) - 28*q*b1,
433 | -1/(q*16), -63/(256*q) + 56*q*b1, 9/(16*q),
434 | 87/(64*q)-70*q*b1, 9/(16*q), -63/(256*q) + 56*q*b1,
435 | -1/(q*16), 9/(128*q) - 28*q*b1, 0, -1/(256*q) + 8*q*b1, 0,
436 | -q*b1],
437 | [0, b/(8*q), 0, -13*b/(8*q), b/q, 33*b/(8*q), -2*q*b,
438 | -21*b/(8*q), 3*q*b, -21*b/(8*q), -2*q*b, 33*b/(8*q), b/q,
439 | -13*b/(8*q), 0, b/(8*q), 0],
440 | [-b1/q, 0, 4*b1*q, 0, -14*q*b1, 0, 28*q*b1, 0, -35*q*b1, 0,
441 | 28*q*b1, 0, -14*q*b1, 0, 4*b1*q, 0, -b1/q]])
442 | h1 = modulate2(g0, 'b')
443 | h0 = h
444 | print(h1.shape)
445 | print(h0.shape)
446 | if type == 'r':
447 | h1 = modulate2(h, 'b')
448 | h0 = g0
449 | elif fname == 'cd' or fname == '7-9': # by cohen and Daubechies
450 | h0 = np.array([0.026748757411, -0.016864118443, -0.078223266529,
451 | 0.266864118443, 0.602949018236, 0.266864118443,
452 | -0.078223266529, -0.016864118443, 0.026748757411])
453 | g0 = np.array([-0.045635881557, -0.028771763114, 0.295635881557,
454 | 0.557543526229, 0.295635881557, -0.028771763114,
455 | -0.045635881557])
456 | if type == 'd':
457 | h1 = modulate2(g0, 'c')
458 | else:
459 | h1 = modulate2(h0, 'c')
460 | h0 = g0
461 | # use McClellan to obtain 2D filters
462 | t = np.array([[0, 1, 0], [1, 0, 1], [0, 1, 0]])/4 # diamond kernel
463 | h0 = np.sqrt(2) * mctrans(h0, t)
464 | h1 = np.sqrt(2) * mctrans(h1, t)
465 | elif fname == 'oqf_362':
466 | h0 = np.sqrt(2) / 64 * np.array([[np.sqrt(15), -3, 0],
467 | [0, 5, np.sqrt(15)], [-2*np.sqrt(2), 30, 0],
468 | [0, 30, 2*np.sqrt(15)], [np.sqrt(15), 5, 0],
469 | [0, -3, -np.sqrt(15)]])
470 | h1 = -modulate2(h0, 'b')
471 | h1 = -h1[::-1]
472 | if type == 'r':
473 | h0 = h0[::-1]
474 | h1 = -modulate2(h0, 'b')
475 | h1 = -h1[::-1]
476 | elif fname == 'test':
477 | h0 = np.array([[0, 1, 0], [1, 4, 1], [0, 1, 0]])
478 | h1 = np.array([[0, -1, 0], [-1, 4, -1], [0, -1, 0]])
479 | elif fname == 'testDVM':
480 | h0 = np.array([[1, 1], [1, 1]]) / np.sqrt(2)
481 | h1 = np.array([[-1, 1], [1, -1]]) / np.sqrt(2)
482 | elif fname == 'qmf': # by Lu, antoniou and xu
483 | # ideal response window
484 | m = 2
485 | n = 2
486 | w1d = np.kaiser(4*m+1, 2.6)
487 | w = np.zeros((n+m+1,n+m+1))
488 | for n1 in np.arange(-m,m+1):
489 | for n2 in np.arange(-n,n+1):
490 | w[n1+m,n2+n] = w1d[2*m+n1+n2]*w1d[2*m+n1-n2]
491 | h = np.zeros((n+m+1,n+m+1))
492 | for n1 in np.arange(-m,m+1):
493 | for n2 in np.arange(-n,n+1):
494 | h[n1+m, n2+n] = 0.5*np.sinc((n1+n2)/2) * 0.5*np.sinc((n1-n2)/2)
495 | c = np.sum(h)
496 | h = np.sqrt(2) * h
497 | h0 = h * w
498 | h1 = modulate2(h0, 'b')
499 | elif fname == 'qmf2': # by Lu, Antoniou and Xu
500 | # ideal response window
501 | h = np.array([
502 | [-0.001104, 0.002494, -0.001744, 0.004895, -0.000048, -0.000311],
503 | [0.008918, -0.002844, -0.025197, -0.017135, 0.003905, -0.000081],
504 | [-0.007587, -0.065904, 00.100431, -0.055878, 0.007023, 0.001504],
505 | [0.001725, 0.184162, 0.632115, 0.099414, -0.027006, -0.001110],
506 | [-0.017935, -0.000491, 0.191397, -0.001787, -0.010587, 0.002060],
507 | [0.001353, 0.005635, -0.001231, -0.009052, -0.002668, 0.000596]])
508 | h0 = h/np.sum(h)
509 | h1 = modulate2(h0, 'b')
510 | elif fname == 'dmaxflat4':
511 | M1 = 1/np.sqrt(2)
512 | M2 = np.copy(M1)
513 | k1 = 1-np.sqrt(2)
514 | k3 = np.copy(k1)
515 | k2 = np.copy(M1)
516 | h = np.array([0.25*k2*k3, 0.5*k2, 1+0.5*k2*k3])*M1
517 | h = np.append(h, h[-2::-1])
518 | g = np.array([-0.125*k1*k2*k3, 0.25*k1*k2,
519 | -0.5*k1-0.5*k3-0.375*k1*k2*k3, 1+0.5*k1*k2])*M2
520 | g = np.append(g, h[-2::-1])
521 | B = dmaxflat(4,0)
522 | h0 = mctrans(h,B)
523 | g0 = mctrans(g,B)
524 | h0 = np.sqrt(2) * h0 / np.sum(h0)
525 | g0 = np.sqrt(2) * g0 / np.sum(g0)
526 |
527 | h1 = modulate2(g0, 'b')
528 | if type == 'r':
529 | h1 = modulate2(h0, 'b')
530 | h0 = g0
531 | elif fname == 'dmaxflat5':
532 | M1 = 1/np.sqrt(2)
533 | M2 = M1
534 | k1 = 1-np.sqrt(2)
535 | k3 = k1
536 | k2 = M1
537 | h = np.array([0.25*k2*k3, 0.5*k2, 1+0.5*k2*k3])*M1
538 | h = np.append(h, h[-2::-1])
539 | g = np.array([-0.125*k1*k2*k3, 0.25*k1*k2,
540 | -0.5*k1-0.5*k3-0.375*k1*k2*k3, 1+0.5*k1*k2])*M2
541 | g = np.append(g, h[-2::-1])
542 | B = dmaxflat(5,0)
543 | h0 = mctrans(h,B)
544 | g0 = mctrans(g,B)
545 | h0 = np.sqrt(2) * h0 / np.sum(h0)
546 | g0 = np.sqrt(2) * g0 / np.sum(g0)
547 |
548 | h1 = modulate2(g0, 'b')
549 | if type == 'r':
550 | h1 = modulate2(h0, 'b')
551 | h0 = g0
552 | elif fname == 'dmaxflat6':
553 | M1 = 1/np.sqrt(2)
554 | M2 = M1
555 | k1 = 1-np.sqrt(2)
556 | k3 = k1
557 | k2 = M1
558 | h = np.array([0.25*k2*k3, 0.5*k2, 1+0.5*k2*k3])*M1
559 | h = np.append(h, h[-2::-1])
560 | g = np.array([-0.125*k1*k2*k3, 0.25*k1*k2,
561 | -0.5*k1-0.5*k3-0.375*k1*k2*k3, 1+0.5*k1*k2])*M2
562 | g = np.append(g, h[-2::-1])
563 | B = dmaxflat(6,0)
564 | h0 = mctrans(h,B)
565 | g0 = mctrans(g,B)
566 | h0 = np.sqrt(2) * h0 / np.sum(h0)
567 | g0 = np.sqrt(2) * g0 / np.sum(g0)
568 |
569 | h1 = modulate2(g0, 'b')
570 | if type == 'r':
571 | h1 = modulate2(h0, 'b')
572 | h0 = g0
573 | elif fname == 'dmaxflat7':
574 | M1 = 1/np.sqrt(2)
575 | M2 = M1
576 | k1 = 1-np.sqrt(2)
577 | k3 = k1
578 | k2 = M1
579 | h = np.array([0.25*k2*k3, 0.5*k2, 1+0.5*k2*k3])*M1
580 | h = np.append(h, h[-2::-1])
581 | g = np.array([-0.125*k1*k2*k3, 0.25*k1*k2,
582 | -0.5*k1-0.5*k3-0.375*k1*k2*k3, 1+0.5*k1*k2])*M2
583 | g = np.append(g, h[-2::-1])
584 | B = dmaxflat(7,0)
585 | h0 = mctrans(h,B)
586 | g0 = mctrans(g,B)
587 | h0 = np.sqrt(2) * h0 / np.sum(h0)
588 | g0 = np.sqrt(2) * g0 / np.sum(g0)
589 |
590 | h1 = modulate2(g0, 'b')
591 | if type == 'r':
592 | h1 = modulate2(h0, 'b')
593 | h0 = g0
594 | # The original file supports a case "otherwise" for unrecognized filters
595 | # and computes simple 1D wavelet filters for them using wfilters.m
596 | # I think we don't need this and skip this for the time being.
597 | # IN ORIGINAL MATLAB VERSION:
598 | # otherwise
599 | # % Assume the "degenerated" case: 1D wavelet filters
600 | # [h0,h1] = wfilters(fname, type);
601 | return h0, h1
602 |
603 |
604 | def dmaxflat(N,d):
605 | """
606 | THIS IS A REWRITE OF THE ORIGINAL MATLAB IMPLEMENTATION OF dmaxflat.m
607 | FROM THE Nonsubsampled Contourlet Toolbox. -- Stefan Loock, Dec 2016.
608 |
609 | returns 2-D diamond maxflat filters of order 'N'
610 | the filters are nonseparable and 'd' is the (0,0) coefficient, being 1 or 0
611 | depending on use.
612 | by Arthur L. da Cunha, University of Illinois Urbana-Champaign
613 | Aug 2004
614 | """
615 | if (N > 7) or (N < 1):
616 | print('Error: N must be in {1,2,...,7}')
617 | return 0
618 | if N == 1:
619 | h = np.array([[0, 1, 0],[1, 0, 1],[0, 1, 0]])/4
620 | h[1,1] = d
621 | elif N == 2:
622 | h = np.array([[0, -1, 0],[-1, 0, 10], [0, 10, 0]])
623 | h = np.append(h, np.fliplr(h[:,0:-1]), 1)
624 | h = np.append(h, np.flipud(h[0:-1,:]), 0)/32
625 | h[2,2] = d
626 | elif N == 3:
627 | h = np.array([[0, 3, 0, 2],[3, 0, -27, 0],[0, -27, 0, 174],
628 | [2, 0, 174, 0]])
629 | h = np.append(h, np.fliplr(h[:, 0:-1]), 1)
630 | h = np.append(h, np.flipud(h[0:-1,:]),0)
631 | h[3,3] = d
632 | elif N == 4:
633 | h = np.array([[0, -5, 0, -3, 0], [-5, 0, 52, 0, 34],
634 | [0, 52, 0, -276, 0], [-3, 0, -276, 0, 1454],
635 | [0, 34, 0, 1454, 0]])/np.power(2,12)
636 | h = np.append(h, np.fliplr(h[:,0:-1]),1)
637 | h = np.append(h, np.flipud(h[0:-1,:]),0)
638 | h[4,4] = d
639 | elif N == 5:
640 | h = np.array([[0, 35, 0, 20, 0, 18], [35, 0, -425, 0, -250, 0],
641 | [0, -425, 0, 2500, 0, 1610], [20, 0, 2500, 0, -10200, 0],
642 | [0, -250, 0, -10200, 0, 47780],
643 | [18, 0, 1610, 0, 47780, 0]])/np.power(2,17)
644 | h = np.append(h, np.fliplr(h[:,0:-1]),1)
645 | h = np.append(h, np.flipud(h[0:-1,:]),0)
646 | h[5,5] = d
647 | elif N == 6:
648 | h = np.array([[0, -63, 0, -35, 0, -30, 0],
649 | [-63, 0, 882, 0, 495, 0, 444],
650 | [0, 882, 0, -5910, 0, -3420, 0],
651 | [-35, 0, -5910, 0, 25875, 0, 16460],
652 | [0, 495, 0, 25875, 0, -89730, 0],
653 | [-30, 0, -3420, 0, -89730, 0, 389112],
654 | [0, 44, 0, 16460, 0, 389112, 0]])/np.power(2,20)
655 | h = np.append(h, np.fliplr(h[:,0:-1]),1)
656 | h = np.append(h, np.flipud(h[0:-1,:]),0)
657 | h[6,6] = d
658 | elif N == 7:
659 | h = np.array([[0, 231, 0, 126, 0, 105, 0, 100],
660 | [231, 0, -3675, 0, -2009, 0, -1715, 0],
661 | [0, -3675, 0, 27930, 0, 15435, 0, 13804],
662 | [126, 0, 27930, 0, -136514, 0, -77910, 0],
663 | [0, -2009, 0, -136514, 0, 495145, 0, 311780],
664 | [105, 0, 15435, 0, 495145, 0, -1535709, 0],
665 | [0, -1715, 0, -77910, 0, -1534709, 0, 6305740],
666 | [100, 0, 13804, 0, 311780, 0, 6305740, 0]])/np.power(2,24)
667 | h = np.append(h, np.fliplr(h[:,0:-1]),1)
668 | h = np.append(h, np.flipud(h[0:-1,:]),0)
669 | h[7,7] = d
670 | return h
671 |
672 |
673 | def mctrans(b,t):
674 | """
675 | This is a translation of the original Matlab implementation of mctrans.m
676 | from the Nonsubsampled Contourlet Toolbox by Arthur L. da Cunha.
677 |
678 | MCTRANS McClellan transformation
679 |
680 | H = mctrans(B,T)
681 |
682 | produces the 2-D FIR filter H that corresponds to the 1-D FIR filter B
683 | using the transform T.
684 |
685 |
686 | Convert the 1-D filter b to SUM_n a(n) cos(wn) form
687 |
688 | Part of the Nonsubsampled Contourlet Toolbox
689 | (http://www.mathworks.de/matlabcentral/fileexchange/10049-nonsubsampled-contourlet-toolbox)
690 | """
691 |
692 | # Convert the 1-D filter b to SUM_n a(n) cos(wn) form
693 | # if mod(n,2) != 0 -> error
694 | n = (b.size-1)//2
695 |
696 | b = fftlib.fftshift(b[::-1]) #inverse fftshift
697 | b = b[::-1]
698 | a = np.zeros(n+1)
699 | a[0] = b[0]
700 | a[1:n+1] = 2*b[1:n+1]
701 |
702 | inset = np.floor((np.asarray(t.shape)-1)/2)
703 | inset = inset.astype(int)
704 | # Use Chebyshev polynomials to compute h
705 | P0 = 1
706 | P1 = t;
707 | h = a[1]*P1;
708 | rows = int(inset[0]+1)
709 | cols = int(inset[1]+1)
710 | h[rows-1,cols-1] = h[rows-1,cols-1]+a[0]*P0;
711 | for i in range(3,n+2):
712 | P2 = 2*signal.convolve2d(t, P1)
713 | rows = (rows + inset[0]).astype(int)
714 | cols = (cols + inset[1]).astype(int)
715 | if i == 3:
716 | P2[rows-1,cols-1] = P2[rows-1,cols-1] - P0
717 | else:
718 | P2[rows[0]-1:rows[-1],cols[0]-1:cols[-1]] = P2[rows[0]-1:rows[-1],
719 | cols[0]-1:cols[-1]] - P0
720 | rows = inset[0] + np.arange(np.asarray(P1.shape)[0])+1
721 | rows = rows.astype(int)
722 | cols = inset[1] + np.arange(np.asarray(P1.shape)[1])+1
723 | cols = cols.astype(int)
724 | hh = h;
725 | h = a[i-1]*P2
726 | h[rows[0]-1:rows[-1], cols[0]-1:cols[-1]] = h[rows[0]-1:rows[-1],
727 | cols[0]-1:cols[-1]] + hh
728 | P0 = P1;
729 | P1 = P2;
730 | h = np.rot90(h,2)
731 | return h
732 |
733 |
734 | def modulate2(x, type, center=np.array([0, 0])):
735 | """
736 | THIS IS A REWRITE OF THE ORIGINAL MATLAB IMPLEMENTATION OF
737 | modulate2.m FROM THE Nonsubsampled Contourlet Toolbox.
738 |
739 | MODULATE2 2D modulation
740 |
741 | y = modulate2(x, type, [center])
742 |
743 | With TYPE = {'r', 'c' or 'b'} for modulate along the row, or column or
744 | both directions.
745 |
746 | CENTER secify the origin of modulation as floor(size(x)/2)+1+center
747 | (default is [0, 0])
748 |
749 | Part of the Nonsubsampled Contourlet Toolbox
750 | (http://www.mathworks.de/matlabcentral/fileexchange/10049-nonsubsampled-contourlet-toolbox)
751 | """
752 | size = np.asarray(x.shape)
753 | if x.ndim == 1:
754 | if np.array_equal(center, [0, 0]):
755 | center = 0
756 | origin = np.floor(size/2)+1+center
757 | n1 = np.arange(size[0])-origin[0]+1
758 | if x.ndim == 2:
759 | n2 = np.arange(size[1])-origin[1]+1
760 | else:
761 | n2 = n1
762 | if type == 'r':
763 | m1 = np.power(-1,n1)
764 | if x.ndim == 1:
765 | y = x*m1
766 | else:
767 | y = x * np.transpose(np.tile(m1, (size[1], 1)))
768 | elif type == 'c':
769 | m2 = np.power(-1,n2)
770 | if x.ndim == 1:
771 | y = x*m2
772 | else:
773 | y = x * np.tile(m2, np.array([size[0], 1]))
774 | elif type == 'b':
775 | m1 = np.power(-1,n1)
776 | m2 = np.power(-1,n2)
777 | m = np.outer(m1, m2)
778 | if x.ndim == 1:
779 | y = x * m1
780 | else:
781 | y = x * m
782 | return y
783 |
784 | def MirrorFilt(x):
785 | """
786 | This is a translation of the original Matlab implementation of
787 | MirrorFilt.m from the WaveLab850 toolbox.
788 |
789 | MirrorFilt -- Apply (-1)^t modulation
790 | Usage
791 |
792 | h = MirrorFilt(l)
793 |
794 | Inputs
795 |
796 | l 1-d signal
797 |
798 | Outputs
799 |
800 | h 1-d signal with DC frequency content shifted
801 | to Nyquist frequency
802 |
803 | Description
804 |
805 | h(t) = (-1)^(t-1) * x(t), 1 <= t <= length(x)
806 |
807 | See Also: DyadDownHi
808 |
809 | Part of WaveLab850 (http://www-stat.stanford.edu/~wavelab/)
810 | """
811 | return np.power(-1,np.arange(x.size))*x
812 |
813 | """
814 | Copyright (c) 1993. Iain M. Johnstone
815 |
816 | Part of Wavelab Version 850
817 | Built Tue Jan 3 13:20:40 EST 2006
818 | This is Copyrighted Material
819 | For Copying permissions see COPYING.m
820 | Comments? e-mail wavelab@stat.stanford.edu
821 | """
822 |
--------------------------------------------------------------------------------
/pytorch_shearlets/utils.py:
--------------------------------------------------------------------------------
1 | """
2 | This module contains all utilitiy files provided by the ShearLab3D toolbox
3 | from MATLAB such as padding arrays, the discretized shear operator et cetera.
4 |
5 | All these functions were originally written by Rafael Reisenhofer and are
6 | published in the ShearLab3Dv11 toolbox on http://www.shearlet.org.
7 |
8 | Stefan Loock, February 2, 2017 [sloock@gwdg.de]
9 | """
10 |
11 | from __future__ import division
12 | import sys
13 | import torch
14 | import numpy as np
15 | import scipy as scipy
16 | import scipy.io as sio
17 | from pyshearlab.pySLFilters import *
18 |
19 |
20 | def SLcheckFilterSizes(rows,cols, shearLevels,directionalFilter,scalingFilter,
21 | waveletFilter,scalingFilter2):
22 | """
23 | Checks filter sizes for different configurations for a given size of a
24 | square image with rows and cols given by the first two arguments. The
25 | argument shearLevels is a vector containing the desired shear levels for
26 | the shearlet transform.
27 | """
28 | directionalFilter = directionalFilter
29 | scalingFilter = scalingFilter
30 | waveletFilter = waveletFilter
31 | scalingFilter2 = scalingFilter2
32 |
33 | filterSetup = [None] * 8
34 |
35 | # configuration 1
36 | filterSetup[0] = {"directionalFilter": directionalFilter,
37 | "scalingFilter": scalingFilter,
38 | "waveletFilter": waveletFilter,
39 | "scalingFilter2": scalingFilter2}
40 | # configuration 2
41 | h0, h1 = dfilters('dmaxflat4', 'd')
42 | h0 /= np.sqrt(2)
43 | h1 /= np.sqrt(2)
44 | directionalFilter = modulate2(h0, 'c')
45 | scalingFilter = np.array([0.0104933261758410,-0.0263483047033631,
46 | -0.0517766952966370, 0.276348304703363, 0.582566738241592,
47 | 0.276348304703363,-0.0517766952966369,-0.0263483047033631,
48 | 0.0104933261758408])
49 | waveletFilter = MirrorFilt(scalingFilter)
50 | scalingFilter2 = scalingFilter
51 | filterSetup[1] = {"directionalFilter": directionalFilter,
52 | "scalingFilter": scalingFilter,
53 | "waveletFilter": waveletFilter,
54 | "scalingFilter2": scalingFilter2}
55 | # configuration 3
56 | h0, h1 = dfilters('cd', 'd')
57 | h0 /= np.sqrt(2)
58 | h1 /= np.sqrt(2)
59 | directionalFilter = modulate2(h0, 'c')
60 | scalingFilter = np.array([0.0104933261758410, -0.0263483047033631,
61 | -0.0517766952966370, 0.276348304703363, 0.582566738241592,
62 | 0.276348304703363, -0.0517766952966369,-0.0263483047033631,
63 | 0.0104933261758408])
64 | waveletFilter = MirrorFilt(scalingFilter)
65 | scalingFilter2 = scalingFilter
66 | filterSetup[2] = {"directionalFilter": directionalFilter,
67 | "scalingFilter": scalingFilter,
68 | "waveletFilter": waveletFilter,
69 | "scalingFilter2": scalingFilter2}
70 | # configuration 4 - somehow the same as 3, i don't know why?!
71 | h0, h1 = dfilters('cd', 'd')
72 | h0 /= np.sqrt(2)
73 | h1 /= np.sqrt(2)
74 | directionalFilter = modulate2(h0, 'c')
75 | scalingFilter = np.array([0.0104933261758410, -0.0263483047033631,
76 | -0.0517766952966370, 0.276348304703363, 0.582566738241592,
77 | 0.276348304703363, -0.0517766952966369,-0.0263483047033631,
78 | 0.0104933261758408])
79 | waveletFilter = MirrorFilt(scalingFilter)
80 | scalingFilter2 = scalingFilter
81 | filterSetup[3] = {"directionalFilter": directionalFilter,
82 | "scalingFilter": scalingFilter,
83 | "waveletFilter": waveletFilter,
84 | "scalingFilter2": scalingFilter2}
85 | # configuration 5
86 | h0, h1 = dfilters('cd', 'd')
87 | h0 /= np.sqrt(2)
88 | h1 /= np.sqrt(2)
89 | directionalFilter = modulate2(h0, 'c')
90 | scalingFilter = MakeONFilter('Coiflet', 1)
91 | waveletFilter = MirrorFilt(scalingFilter)
92 | scalingFilter2 = scalingFilter
93 | filterSetup[4] = {"directionalFilter": directionalFilter,
94 | "scalingFilter": scalingFilter,
95 | "waveletFilter": waveletFilter,
96 | "scalingFilter2": scalingFilter2}
97 | # configuration 6
98 | h0, h1 = dfilters('cd', 'd')
99 | h0 /= np.sqrt(2)
100 | h1 /= np.sqrt(2)
101 | directionalFilter = modulate2(h0, 'c')
102 | scalingFilter = MakeONFilter('Daubechies', 4)
103 | waveletFilter = MirrorFilt(scalingFilter)
104 | scalingFilter2 = scalingFilter
105 | filterSetup[5] = {"directionalFilter": directionalFilter,
106 | "scalingFilter": scalingFilter,
107 | "waveletFilter": waveletFilter,
108 | "scalingFilter2": scalingFilter2}
109 | # configuration 7
110 | h0, h1 = dfilters('oqf_362', 'd')
111 | h0 /= np.sqrt(2)
112 | h1 /= np.sqrt(2)
113 | directionalFilter = modulate2(h0, 'c')
114 | scalingFilter = MakeONFilter('Daubechies', 4)
115 | waveletFilter = MirrorFilt(scalingFilter)
116 | scalingFilter2 = scalingFilter
117 | filterSetup[6] = {"directionalFilter": directionalFilter,
118 | "scalingFilter": scalingFilter,
119 | "waveletFilter": waveletFilter,
120 | "scalingFilter2": scalingFilter2}
121 | # configuration 8
122 | h0, h1 = dfilters('oqf_362', 'd')
123 | h0 /= np.sqrt(2)
124 | h1 /= np.sqrt(2)
125 | directionalFilter = modulate2(h0, 'c')
126 | scalingFilter = MakeONFilter('Haar')
127 | scalingFilter2 = scalingFilter
128 | filterSetup[7] = {"directionalFilter": directionalFilter,
129 | "scalingFilter": scalingFilter,
130 | "waveletFilter": waveletFilter,
131 | "scalingFilter2": scalingFilter2}
132 | success = False
133 | for k in range(8):
134 | #check 1
135 | lwfilter = filterSetup[k]["waveletFilter"].size
136 | lsfilter = filterSetup[k]["scalingFilter"].size
137 | lcheck1 = lwfilter
138 | for j in range(shearLevels.size):
139 | lcheck1 = lsfilter + 2*lcheck1 -2
140 | if lcheck1 > cols or lcheck1 > rows:
141 | continue
142 | #check 2
143 | rowsdirfilter = np.asarray(filterSetup[k]["directionalFilter"].shape)[0]
144 | colsdirfilter = np.asarray(filterSetup[k]["directionalFilter"].shape)[1]
145 | lcheck2 = (rowsdirfilter-1)*np.power(2, max(shearLevels)+1)+1
146 |
147 | lsfilter2 = filterSetup[k]["scalingFilter2"].size
148 | lcheck2help = lsfilter2
149 | for j in range(1, int(max(shearLevels))+1):
150 | lcheck2help = lsfilter2 + 2*lcheck2help -2
151 | lcheck2 = lcheck2help + lcheck2-1
152 | if lcheck2 > cols or lcheck2 > rows or colsdirfilter > cols or colsdirfilter > rows:
153 | continue
154 | success = 1
155 | break
156 | directionalFilter = filterSetup[k]["directionalFilter"]
157 | scalingFilter = filterSetup[k]["scalingFilter"]
158 | waveletFilter = filterSetup[k]["waveletFilter"]
159 | scalingFilter2 = filterSetup[k]["scalingFilter2"]
160 | if success == 0:
161 | sys.exit("The specified Shearlet system is not available for data of size "
162 | + str(rows) + "x" + str(cols) + ". Try decreasing the number of scales and shearings.")
163 | if success == 1 and k>0:
164 | print("Warning: The specified Shearlet system was not available for data of size " + str(rows) + "x" + str(cols) + ". Filters were automatically set to configuration " + str(k) + "(see SLcheckFilterSizes).")
165 | return directionalFilter, scalingFilter, waveletFilter, scalingFilter2
166 | else:
167 | return directionalFilter, scalingFilter, waveletFilter, scalingFilter2
168 |
169 |
170 |
171 | def SLcomputePSNR(X, Xnoisy):
172 | """
173 | SLcomputePSNR Compute peak signal to noise ratio (PSNR).
174 |
175 | Usage:
176 |
177 | PSNR = SLcomputePSNR(X, Xnoisy)
178 |
179 | Input:
180 |
181 | X: 2D or 3D signal.
182 | Xnoisy: 2D or 3D noisy signal.
183 |
184 | Output:
185 |
186 | PSNR: The peak signal to noise ratio (in dB).
187 | """
188 |
189 | MSEsqrt = np.linalg.norm(X-Xnoisy) / np.sqrt(X.size)
190 | if MSEsqrt == 0:
191 | return np.inf
192 | else:
193 | return 20 * np.log10(255 / MSEsqrt)
194 |
195 | def SLcomputeSNR(X, Xnoisy):
196 | """
197 | SLcomputeSNR Compute signal to noise ratio (SNR).
198 |
199 | Usage:
200 |
201 | SNR = SLcomputeSNR(X, Xnoisy)
202 |
203 | Input:
204 |
205 | X: 2D or 3D signal.
206 | Xnoisy: 2D or 3D noisy signal.
207 |
208 | Output:
209 |
210 | SNR: The signal to noise ratio (in dB).
211 | """
212 |
213 | if np.linalg.norm(X-Xnoisy) == 0:
214 | return np.Inf
215 | else:
216 | return 10 * np.log10( np.sum(np.power(X,2)) / np.sum(np.power(X-Xnoisy,2)) )
217 |
218 |
219 |
220 | def SLdshear(inputArray, k, axis):
221 | """
222 | Computes the discretized shearing operator for a given inputArray, shear
223 | number k and axis.
224 |
225 | This version is adapted such that the MATLAB indexing can be used here in the
226 | Python version.
227 | """
228 | axis = axis - 1
229 | if k==0:
230 | return inputArray
231 | rows = np.asarray(inputArray.shape)[0]
232 | cols = np.asarray(inputArray.shape)[1]
233 |
234 | shearedArray = np.zeros((rows, cols), dtype=inputArray.dtype)
235 |
236 | if axis == 0:
237 | for col in range(cols):
238 | shearedArray[:,col] = np.roll(inputArray[:,col], int(k * np.floor(cols/2-col)))
239 | else:
240 | for row in range(rows):
241 | shearedArray[row,:] = np.roll(inputArray[row,:], int(k * np.floor(rows/2-row)))
242 | return shearedArray
243 |
244 |
245 | def SLgetShearletIdxs2D(shearLevels, full=0, *args):
246 | """
247 | Computes a index set describing a 2D shearlet system.
248 |
249 | Usage:
250 |
251 | shearletIdxs = SLgetShearletIdxs2D(shearLevels)
252 | shearletIdxs = SLgetShearletIdxs2D(shearLevels, full)
253 | shearletIdxs = SLgetShearletIdxs2D(shearLevels, full, 'NameRestriction1', ValueRestriction1,...)
254 |
255 | Input:
256 |
257 | shearLevels: A 1D array, specifying the level of shearing on
258 | each scale.
259 | Each entry of shearLevels has to be >= 0. A
260 | shear level of K means that the generating
261 | shearlet is sheared 2^K times in each direction
262 | for each cone.
263 |
264 | For example: If shearLevels = [1 1 2], the
265 | corresponding shearlet system has a maximum
266 | redundancy of
267 | (2*(2*2^1+1))+(2*(2*2^1+1))+(2*(2*2^2+1))=38
268 | (omitting the lowpass shearlet). Note
269 | that it is recommended not to use the full
270 | shearlet system but to omit shearlets lying on
271 | the border of the second cone as they are only
272 | slightly different from those on the border of
273 | the first cone.
274 |
275 | full: Logical value that determines whether the
276 | indexes are computed for a full shearlet
277 | system or if shearlets lying on the border of
278 | the second cone are omitted. The default and
279 | recommended value is 0.
280 |
281 | TypeRestriction1: Possible restrictions: 'cones', 'scales',
282 | 'shearings'.
283 |
284 | ValueRestriction1: Numerical value or Array specifying a
285 | restriction. If the type of the restriction is
286 | 'scales' the value 1:2 ensures that only indexes
287 | corresponding the shearlets on the first two
288 | scales are computed.
289 |
290 | Output:
291 |
292 | shearletIdxs: Nx3 matrix, where each row describes one
293 | shearlet in the format [cone scale shearing].
294 |
295 | Example 1:
296 | Compute the indexes, describing a 2D shearlet system with 3 scales:
297 |
298 | shearletIdxs = SLgetShearletIdxs2D([1 1 2])
299 |
300 | Example 2:
301 | Compute the subset of a shearlet system, containing only shearlets on
302 | the first scale:
303 |
304 | shearletSystem = SLgetShearletSystem2D(0,512,512,4)
305 | subsetIdxs = SLgetShearletIdxs2D(shearletSystem.shearLevels,shearletSystem.full,'scales',1)
306 | subsystem = SLgetSubsystem2D(shearletSystem,subsetIdxs)
307 |
308 |
309 | See also: SLgetShearletSystem2D, SLgetSubsystem2D
310 | """
311 | shearletIdxs = []
312 | includeLowpass = 1
313 | # if a scalar is passed as shearLevels, we treat it as an array.
314 | if not hasattr(shearLevels, "__len__"):
315 | shearLevels = np.array([shearLevels])
316 | scales = np.asarray(range(1,len(shearLevels)+1))
317 | shearings = np.asarray(range(-np.power(2,np.max(shearLevels)),np.power(2,np.max(shearLevels))+1))
318 | cones = np.array([1,2])
319 | for j in range(0,len(args),2):
320 | includeLowpass = 0
321 | if args[j] == "scales":
322 | scales = args[j+1]
323 | elif args[j] == "shearings":
324 | shearings = args[j+1]
325 | elif args[j] == "cones":
326 | cones = args[j+1]
327 | for cone in np.intersect1d(np.array([1,2]), cones):
328 | for scale in np.intersect1d(np.asarray(range(1,len(shearLevels)+1)), scales):
329 | for shearing in np.intersect1d(np.asarray(range(-np.power(2,shearLevels[scale-1]),np.power(2,shearLevels[scale-1])+1)), shearings):
330 | if (full == 1) or (cone == 1) or (np.abs(shearing) < np.power(2, shearLevels[scale-1])):
331 | shearletIdxs.append(np.array([cone, scale, shearing]))
332 | if includeLowpass or 0 in scales or 0 in cones:
333 | shearletIdxs.append(np.array([0,0,0]))
334 | return np.asarray(shearletIdxs)
335 |
336 |
337 |
338 | def SLgetShearlets2D(preparedFilters, shearletIdxs=None):
339 | """
340 | Compute 2D shearlets in the frequency domain.
341 |
342 | Usage:
343 |
344 | [shearlets, RMS, dualFrameWeights]
345 | = SLgetShearlets2D(preparedFilters)
346 | [shearlets, RMS, dualFrameWeights]
347 | = SLgetShearlets2D(preparedFilters, shearletIdxs)
348 |
349 | Input:
350 |
351 | preparedFilters: A structure containing filters that can be
352 | used to compute 2D shearlets. Such filters
353 | can be generated with SLprepareFilters2D.
354 |
355 | shearletdIdxs: A Nx3 array, specifying each shearlet that
356 | is to be computed in the format
357 | [cone scale shearing] where N denotes the
358 | number of shearlets. The vertical cone in
359 | time domain is indexed by 1 while the
360 | horizontal cone is indexed by 2.
361 | Note that the values for scale and shearing
362 | are limited by the precomputed filters. The
363 | lowpass shearlet is indexed by [0 0 0]. If
364 | no shearlet indexes are specified,
365 | SLgetShearlets2D returns a standard
366 | shearlet system based on the precomputed
367 | filters.
368 | Such a standard index set can also be
369 | obtained by calling SLgetShearletIdxs2D.
370 |
371 | Output:
372 |
373 | shearlets: A X x Y x N array of N 2D shearlets in the
374 | frequency domain where X and Y denote the
375 | size of each shearlet.
376 | RMS: A 1xN array containing the root mean
377 | squares (L2-norm divided by sqrt(X*Y)) of all
378 | shearlets stored in shearlets. These values
379 | can be used to normalize shearlet coefficients
380 | to make them comparable.
381 | dualFrameWeights: A X x Y matrix containing the absolute and
382 | squared sum over all shearlets stored in
383 | shearlets. These weights are needed to compute
384 | the dual frame during reconstruction.
385 |
386 | Description:
387 |
388 | The wedge and bandpass filters in preparedFilters are used to compute
389 | shearlets on different scales and of different shearings, as specified by
390 | the shearletIdxs array. Shearlets are computed in the frequency domain.
391 | To get the i-th shearlet in the time domain, use
392 |
393 | fftshift(ifft2(ifftshift(shearlets(:,:,i)))).
394 |
395 | Each Shearlet is centered at floor([X Y]/2) + 1.
396 |
397 | Example 1:
398 | Compute the lowpass shearlet:
399 |
400 | preparedFilters
401 | = SLprepareFilters2D(512,512,4,[1 1 2 2])
402 | lowpassShearlet
403 | = SLgetShearlets2D(preparedFilters,[0 0 0])
404 | lowpassShearletTimeDomain
405 | = fftshift(ifft2(ifftshift(lowpassShearlet)))
406 |
407 | Example 2:
408 | Compute a standard shearlet system of four scales:
409 |
410 | preparedFilters = SLprepareFilters2D(512,512,4)
411 | shearlets = SLgetShearlets2D(preparedFilters)
412 |
413 | Example 3:
414 | Compute a full shearlet system of four scales:
415 |
416 | preparedFilters = SLprepareFilters2D(512,512,4)
417 | shearlets = SLgetShearlets2D(preparedFilters,SLgetShearletIdxs2D(preparedFilters.shearLevels,1))
418 |
419 | See also: SLprepareFilters2D, SLgetShearletIdxs2D, SLsheardec2D, SLshearrec2D
420 | """
421 |
422 | if shearletIdxs is None:
423 | shearletIdxs = SLgetShearletIdxs2D(preparedFilters["shearLevels"])
424 | # useGPU = preparedFilters["useGPU"] - we don't support gpus right now
425 | rows = preparedFilters["size"][0]
426 | cols = preparedFilters["size"][1]
427 | nShearlets = shearletIdxs.shape[0]
428 | # allocate shearlets
429 | # ...skipping gpu part...
430 | shearlets = np.zeros((rows,cols,nShearlets), dtype=complex)
431 |
432 | # compute shearlets
433 | for j in range(nShearlets):
434 | cone = shearletIdxs[j,0]
435 | scale = shearletIdxs[j,1]
436 | shearing = shearletIdxs[j,2]
437 | if cone == 0:
438 | shearlets[:,:,j] = preparedFilters["cone1"]["lowpass"]
439 | elif cone == 1:
440 | #here, the fft of the digital shearlet filters described in
441 | #equation (23) on page 15 of "ShearLab 3D: Faithful Digital
442 | #Shearlet Transforms based on Compactly Supported Shearlets" is computed.
443 | #for more details on the construction of the wedge and bandpass
444 | #filters, please refer to SLgetWedgeBandpassAndLowpassFilters2D.
445 | #print(preparedFilters["cone1"]["wedge"][preparedFilters["shearLevels"][scale-1]])
446 | #print(preparedFilters["shearLevels"][scale-1])
447 | # letztes index checken! ggf. +1?
448 | shearlets[:,:,j] = preparedFilters["cone1"]["wedge"][preparedFilters["shearLevels"][scale-1]][:,:,-shearing+np.power(2,preparedFilters["shearLevels"][scale-1])]*np.conj(preparedFilters["cone1"]["bandpass"][:,:,scale-1])
449 | else:
450 | shearlets[:,:,j] = np.transpose(preparedFilters["cone2"]["wedge"][preparedFilters["shearLevels"][scale-1]][:,:,shearing+np.power(2,preparedFilters["shearLevels"][scale-1])]*np.conj(preparedFilters["cone2"]["bandpass"][:,:,scale-1]))
451 | # the matlab version only returns RMS and dualFrameWeights if the function
452 | # is called accordingly. we compute them always for the time being.
453 | RMS = np.linalg.norm(shearlets, axis=(0,1))/np.sqrt(rows*cols)
454 | dualFrameWeights = np.sum(np.power(np.abs(shearlets),2), axis=2)
455 |
456 | return shearlets, RMS, dualFrameWeights
457 |
458 |
459 | def SLgetWedgeBandpassAndLowpassFilters2D(rows,cols,shearLevels,directionalFilter=None,scalingFilter=None,waveletFilter=None,scalingFilter2=None):
460 | """
461 | Computes the wedge, bandpass and lowpass filter for 2D shearlets. If no
462 | directional filter, scaling filter and wavelet filter are given, some
463 | standard filters are used.
464 |
465 | rows, cols and shearLevels are mandatory.
466 | """
467 | if scalingFilter is None:
468 | scalingFilter = np.array([0.0104933261758410, -0.0263483047033631,
469 | -0.0517766952966370, 0.276348304703363,
470 | 0.582566738241592, 0.276348304703363,
471 | -0.0517766952966369, -0.0263483047033631,
472 | 0.0104933261758408])
473 | if scalingFilter2 is None:
474 | scalingFilter2 = scalingFilter
475 | if waveletFilter is None:
476 | waveletFilter = MirrorFilt(scalingFilter)
477 | if directionalFilter is None:
478 | h0,h1 = dfilters('dmaxflat4', 'd')/np.sqrt(2)
479 | directionalFilter = modulate2(h0, 'c')
480 |
481 | ###########################################################################
482 | # all page and equation numbers refer to "ShearLab 3D: Faithful Digital #
483 | # Shearlet Transforms based on Compactly Supported Shearlets" #
484 | ###########################################################################
485 |
486 | # initialize variables
487 |
488 | # get number of scales
489 | NScales = shearLevels.size
490 |
491 | # allocate bandpass and wedge filters
492 | bandpass = np.zeros((rows,cols, NScales), dtype=complex) #these filters partition the frequency plane into different scales
493 | wedge = [None] * ( max(shearLevels) + 1 ) # these filters partition the frequenecy plane into different directions
494 |
495 | #normalize filters
496 | directionalFilter = directionalFilter/sum(sum(np.absolute(directionalFilter)))
497 |
498 | ## compute 1D high and lowpass filters at different scales:
499 | #
500 | # filterHigh{NScales} = g_1 and filterHigh{1} = g_J (compare page 11)
501 | filterHigh = [None] * NScales
502 | # we have filterLow{NScales} = h_1 and filterLow{1} = h_J (compare page 11)
503 | filterLow = [None] * NScales
504 | #typically, we have filterLow2{max(shearLevels)+1} = filterLow{NScales},
505 | # i.e. filterLow2{NScales} = h_1 (compare page 11)
506 | filterLow2 = [None] * (max(shearLevels) + 1)
507 |
508 | ## initialize wavelet highpass and lowpass filters:
509 | #
510 | # this filter is typically chosen to form a quadrature mirror filter pair
511 | # with scalingFilter and corresponds to g_1 on page 11
512 | filterHigh[-1] = waveletFilter
513 | filterLow[-1] = scalingFilter # this filter corresponds to h_1 on page 11
514 | # this filter is typically chosen to be equal to scalingFilter and provides
515 | # the y-direction for the tensor product constructing the 2D wavelet filter
516 | # w_j on page 14
517 | filterLow2[-1] = scalingFilter2
518 |
519 | # compute wavelet high- and lowpass filters associated with a 1D Digital
520 | # wavelet transform on Nscales scales, e.g., we compute h_1 to h_J and
521 | # g_1 to g_J (compare page 11) with J = nScales.
522 | for j in range(len(filterHigh)-2,-1,-1):
523 | filterLow[j] = np.convolve(filterLow[-1], SLupsample(filterLow[j+1],2,1))
524 | filterHigh[j] = np.convolve(filterLow[-1], SLupsample(filterHigh[j+1],2,1))
525 | for j in range(len(filterLow2)-2,-1,-1):
526 | filterLow2[j] = np.convolve(filterLow2[-1], SLupsample(filterLow2[j+1],2,1))
527 | # construct bandpass filters for scales 1 to nScales
528 | for j in range(len(filterHigh)):
529 | bandpass[:,:,j] = fftlib.fftshift(fftlib.fft2(fftlib.ifftshift(SLpadArray(filterHigh[j], np.array([rows, cols])))))
530 |
531 | ## construct wedge filters for achieving directional selectivity.
532 | # as the entries in the shearLevels array describe the number of differently
533 | # sheared atoms on a certain scale, a different set of wedge
534 | # filters has to be constructed for each value in shearLevels.
535 | filterLow2[-1].shape = (1, len(filterLow2[-1]))
536 | for shearLevel in np.unique(shearLevels):
537 | # preallocate a total of floor(2^(shearLevel+1)+1) wedge filters, where
538 | # floor(2^(shearLevel+1)+1) is the number of different directions of
539 | # shearlet atoms associated with the horizontal (resp. vertical)
540 | # frequency cones.
541 | #
542 | # plus one for one unsheared shearlet
543 | wedge[shearLevel] = np.zeros((rows, cols, int(np.floor(np.power(2,shearLevel+1)+1))), dtype=complex)
544 |
545 | # upsample directional filter in y-direction. by upsampling the directional
546 | # filter in the time domain, we construct repeating wedges in the
547 | # frequency domain ( compare abs(fftshift(fft2(ifftshift(directionalFilterUpsampled)))) and
548 | # abs(fftshift(fft2(ifftshift(directionalFilter)))) ).
549 |
550 | directionalFilterUpsampled = SLupsample(directionalFilter, 1, np.power(2,shearLevel+1)-1)
551 |
552 | # remove high frequencies along the y-direction in the frequency domain.
553 | # by convolving the upsampled directional filter with a lowpass filter in y-direction, we remove all
554 | # but the central wedge in the frequency domain.
555 | #
556 | # convert filterLow2 into a pseudo 2D array of size (len, 1) to use
557 | # the scipy.signal.convolve2d accordingly.
558 | filterLow2[-1-shearLevel].shape = (1, len(filterLow2[-1-shearLevel]))
559 |
560 | wedgeHelp = scipy.signal.convolve2d(directionalFilterUpsampled,np.transpose(filterLow2[len(filterLow2)-shearLevel-1]));
561 | wedgeHelp = SLpadArray(wedgeHelp,np.array([rows,cols]));
562 | # please note that wedgeHelp now corresponds to
563 | # conv(p_j,h_(J-j*alpha_j/2)') in the language of the paper. to see
564 | # this, consider the definition of p_j on page 14, the definition of w_j
565 | # on the same page an the definition of the digital sheralet filter on
566 | # page 15. furthermore, the g_j part of the 2D wavelet filter w_j is
567 | # invariant to shearings, hence it suffices to apply the digital shear
568 | # operator to wedgeHelp.
569 |
570 | ## application of the digital shear operator (compare equation (22))
571 | # upsample wedge filter in x-direction. this operation corresponds to
572 | # the upsampling in equation (21) on page 15.
573 | wedgeUpsampled = SLupsample(wedgeHelp,2,np.power(2,shearLevel)-1);
574 |
575 | #convolve wedge filter with lowpass filter, again following equation
576 | # (21) on page 14.
577 | #print("shearLevel:" + str(shearLevel) + ", Index: " + str(len(filterLow2)-max(shearLevel-1,0)-1) + ", Shape: " + str(filterLow2[len(filterLow2)-max(shearLevel-1,0)-1].shape))
578 | #print(filterLow2[len(filterLow2)-max(shearLevel-1,0)-1].shape)
579 | lowpassHelp = SLpadArray(filterLow2[len(filterLow2)-max(shearLevel-1,0)-1], np.asarray(wedgeUpsampled.shape))
580 | if shearLevel >= 1:
581 | wedgeUpsampled = fftlib.fftshift(fftlib.ifft2(fftlib.ifftshift(fftlib.fftshift(fftlib.fft2(fftlib.ifftshift(lowpassHelp))) * fftlib.fftshift(fftlib.fft2(fftlib.ifftshift(wedgeUpsampled))))))
582 | lowpassHelpFlip = np.fliplr(lowpassHelp)
583 | # traverse all directions of the upper part of the left horizontal
584 | # frequency cone
585 | for k in range(-np.power(2, shearLevel), np.power(2, shearLevel)+1):
586 | # resample wedgeUpsampled as given in equation (22) on page 15.
587 | wedgeUpsampledSheared = SLdshear(wedgeUpsampled,k,2)
588 | # convolve again with flipped lowpass filter, as required by
589 | # equation (22) on page 15
590 | if shearLevel >= 1:
591 | wedgeUpsampledSheared = fftlib.fftshift(fftlib.ifft2(fftlib.ifftshift(fftlib.fftshift(fftlib.fft2(fftlib.ifftshift(lowpassHelpFlip))) * fftlib.fftshift(fftlib.fft2(fftlib.ifftshift(wedgeUpsampledSheared))))))
592 | # obtain downsampled and renormalized and sheared wedge filter
593 | # in the frequency domain, according to equation (22), page 15.
594 | wedge[shearLevel][:,:,int(np.fix(np.power(2,shearLevel))-k)] = fftlib.fftshift(fftlib.fft2(fftlib.ifftshift(np.power(2,shearLevel)*wedgeUpsampledSheared[:,0:np.power(2,shearLevel)*cols-1:np.power(2,shearLevel)])))
595 | # compute low pass filter of shearlet system
596 | lowpass = fftlib.fftshift(fftlib.fft2(fftlib.ifftshift(SLpadArray(np.outer(filterLow[0],filterLow[0]), np.array([rows, cols])))))
597 | return wedge, bandpass, lowpass
598 |
599 |
600 | def SLnormalizeCoefficients2D(coeffs, shearletSystem):
601 | """
602 | Normalizes the shearlet coefficients coeffs for a given set of 2D
603 | shearlet coefficients and a given shearlet system shearletSystem
604 | by dividing by the RMS of each shearlet.
605 | """
606 | coeffsNormalized = np.zeros(coeffs.shape)
607 |
608 | for i in range(shearletSystem["nShearlets"]):
609 | coeffsNormalized[:,:,i] = coeffs[:,:,i] / shearletSystem["RMS"][i]
610 | return coeffsNormalized
611 |
612 |
613 |
614 |
615 | def SLpadArray(array, newSize):
616 | """
617 | Implements the padding of an array as performed by the Matlab variant.
618 | """
619 | if np.isscalar(newSize):
620 | #padSizes = np.zeros((1,newSize))
621 | # check if array is a vector...
622 | currSize = array.size
623 | paddedArray = np.zeros(newSize)
624 | sizeDiff = newSize - currSize
625 | idxModifier = 0
626 | if sizeDiff < 0:
627 | sys.exit("Error: newSize is smaller than actual array size.")
628 | if sizeDiff == 0:
629 | print("Warning: newSize is equal to padding size.")
630 | if sizeDiff % 2 == 0:
631 | padSizes = sizeDiff//2
632 | else:
633 | padSizes = int(np.ceil(sizeDiff/2))
634 | if currSize % 2 == 0:
635 | # index 1...k+1
636 | idxModifier = 1
637 | else:
638 | # index 0...k
639 | idxModifier = 0
640 | print(padSizes)
641 | paddedArray[padSizes-idxModifier:padSizes+currSize-idxModifier] = array
642 |
643 | else:
644 | padSizes = np.zeros(newSize.size)
645 | paddedArray = np.zeros((newSize[0], newSize[1]))
646 | idxModifier = np.array([0, 0])
647 | currSize = np.asarray(array.shape)
648 | if array.ndim == 1:
649 | currSize = np.array([len(array), 0])
650 | for k in range(newSize.size):
651 | sizeDiff = newSize[k] - currSize[k]
652 | if sizeDiff < 0:
653 | sys.exit("Error: newSize is smaller than actual array size in dimension " + str(k) + ".")
654 | if sizeDiff == 0:
655 | print("Warning: newSize is equal to padding size in dimension " + str(k) + ".")
656 | if sizeDiff % 2 == 0:
657 | padSizes[k] = sizeDiff//2
658 | else:
659 | padSizes[k] = np.ceil(sizeDiff/2)
660 | if currSize[k] % 2 == 0:
661 | # index 1...k+1
662 | idxModifier[k] = 1
663 | else:
664 | # index 0...k
665 | idxModifier[k] = 0
666 | padSizes = padSizes.astype(int)
667 |
668 | # if array is 1D but paddedArray is 2D we simply put the array (as a
669 | # row array in the middle of the new empty array). this seems to be
670 | # the behavior of the ShearLab routine from matlab.
671 | if array.ndim == 1:
672 | paddedArray[padSizes[1], padSizes[0]:padSizes[0]+currSize[0]+idxModifier[0]] = array
673 | else:
674 | paddedArray[padSizes[0]-idxModifier[0]:padSizes[0]+currSize[0]-idxModifier[0],
675 | padSizes[1]+idxModifier[1]:padSizes[1]+currSize[1]+idxModifier[1]] = array
676 | return paddedArray
677 |
678 |
679 | def SLprepareFilters2D(rows, cols, nScales, shearLevels=None,
680 | directionalFilter=None, scalingFilter=None, waveletFilter=None,
681 | scalingFilter2=None):
682 | """
683 | Usage:
684 |
685 | filters = SLprepareFilters2D(rows, cols, nScales)
686 | filters = SLprepareFilters2D(rows, cols, nScales,
687 | shearLevels)
688 | filters = SLprepareFilters2D(rows, cols, nScales,
689 | shearLevels, directionalFilter)
690 | filters
691 | = SLprepareFilters2D(rows, cols, nScales, shearLevels,
692 | directionalFilter, quadratureMirrorfilter)
693 |
694 | Input:
695 |
696 | rows: Number of rows.
697 | cols: Number of columns.
698 | nScales: Number of scales of the desired shearlet system.
699 | Has to be >= 1.
700 | shearLevels: A 1xnScales sized array, specifying the level
701 | of shearing occuring on each scale. Each entry
702 | of shearLevels has to be >= 0. A shear level
703 | of K means that the generating shearlet is
704 | sheared 2^K times in each direction for each
705 | cone.
706 | For example: If nScales = 3 and
707 | shearLevels = [1 1 2], the precomputed filters
708 | correspond to a shearlet system with a maximum
709 | number of
710 | (2*(2*2^1+1))+(2*(2*2^1+1))+(2*(2*2^2+1))=38
711 | shearlets (omitting the lowpass shearlet and
712 | translation). Note that it is recommended not
713 | to use the full shearlet system but to omit
714 | shearlets lying on the border of the second
715 | cone as they are only slightly different from
716 | those on the border of the first cone.
717 | directionalFilter: A 2D directional filter that serves as
718 | the basis of the directional 'component' of
719 | the shearlets.
720 | The default choice is
721 | modulate2(dfilters('dmaxflat4','d'),'c').
722 | For small sized inputs, or very large systems
723 | the default directional filter might be too
724 | large. In this case, it is recommended to use
725 | modulate2(dfilters('cd','d'),'c').
726 | quadratureMirrorFilter: A 1D quadrature mirror filter
727 | defining the wavelet 'component' of the
728 | shearlets. The default choice is
729 | [0.0104933261758410,-0.0263483047033631,-0.0517766952966370,
730 | 0.276348304703363,0.582566738241592,0.276348304703363,
731 | -0.0517766952966369,-0.0263483047033631,0.0104933261758408].
732 |
733 | Other QMF filters can be genereted with
734 | MakeONFilter.
735 |
736 | Output:
737 |
738 | filters: A structure containing wedge and bandpass filters
739 | that can be used to compute 2D shearlets.
740 |
741 | Description:
742 |
743 | Based on the specified directional filter and quadrature mirror filter,
744 | 2D wedge and bandpass filters are computed that can be used to compute arbitrary 2D
745 | shearlets for data of size [rows cols] on nScales scales with as many
746 | shearings as specified by the shearLevels array.
747 |
748 | Example 1:
749 |
750 | Prepare filters for a input of size 512x512 and a 4-scale shearlet system
751 |
752 | preparedFilters = SLprepareFilters2D(512,512,4)
753 | shearlets = SLgetShearlets2D(preparedFilters)
754 |
755 | Example 2:
756 |
757 | Prepare filters for a input of size 512x512 and a 3-scale shearlet system
758 | with 2^3 = 8 shearings in each direction for each cone on all 3 scales.
759 |
760 | preparedFilters = SLprepareFilters2D(512,512,3,[3 3 3])
761 | shearlets = SLgetShearlets2D(preparedFilters)
762 |
763 | See also: SLgetShearletIdxs2D,SLgetShearlets2D,dfilters,MakeONFilter
764 | """
765 | # check input arguments
766 | if shearLevels is None:
767 | shearLevels = np.ceil(np.arange(1,nScales+1)/2).astype(int)
768 | if scalingFilter is None:
769 | scalingFilter = np.array([0.0104933261758410, -0.0263483047033631,
770 | -0.0517766952966370, 0.276348304703363, 0.582566738241592,
771 | 0.276348304703363, -0.0517766952966369, -0.0263483047033631,
772 | 0.0104933261758408])
773 | if scalingFilter2 is None:
774 | scalingFilter2 = scalingFilter
775 | if directionalFilter is None:
776 | h0, h1 = dfilters('dmaxflat4', 'd')/np.sqrt(2)
777 | directionalFilter = modulate2(h0, 'c')
778 | if waveletFilter is None:
779 | waveletFilter = MirrorFilt(scalingFilter)
780 | directionalFilter, scalingFilter, waveletFilter, scalingFilter2 = SLcheckFilterSizes(rows, cols, shearLevels, directionalFilter, scalingFilter, waveletFilter, scalingFilter2)
781 | fSize = np.array([rows, cols])
782 | filters = {"size": fSize, "shearLevels": shearLevels}
783 | wedge1, bandpass1, lowpass1 = SLgetWedgeBandpassAndLowpassFilters2D(rows,cols,shearLevels,directionalFilter,scalingFilter,waveletFilter,scalingFilter2)
784 | wedge1[0] = 0 # for matlab compatibilty (saving filters as .mat files)
785 | filters["cone1"] = {"wedge": wedge1, "bandpass": bandpass1, "lowpass": lowpass1}
786 | if rows == cols:
787 | filters["cone2"] = filters["cone1"]
788 | else:
789 | wedge2, bandpass2, lowpass2 = SLgetWedgeBandpassAndLowpassFilters2D(cols,rows,shearLevels,directionalFilter,scalingFilter,waveletFilter,scalingFilter2)
790 | wedge2[0] = 0 # for matlab compatibilty (saving filters as .mat files)
791 | filters["cone2"] = {"wedge": wedge2, "bandpass": bandpass2, "lowpass": lowpass2}
792 | return filters
793 | #
794 | ##############################################################################
795 |
796 |
797 | ##############################################################################
798 | #
799 | def SLupsample(array, dims, nZeros):
800 | """
801 | Performs an upsampling by a number of nZeros along the dimenion(s) dims
802 | for a given array.
803 |
804 | Note that this version behaves like the Matlab version, this means we would
805 | have dims = 1 or dims = 2 instead of dims = 0 and dims = 1.
806 | """
807 | if array.ndim == 1:
808 | sz = len(array)
809 | idx = range(1,sz)
810 | arrayUpsampled = np.insert(array, idx, 0)
811 | else:
812 | sz = np.asarray(array.shape)
813 | # behaves like in matlab: dims == 1 and dims == 2 instead of 0 and 1.
814 | if dims == 0:
815 | sys.exit("SLupsample behaves like in Matlab, so chose dims = 1 or dims = 2.")
816 | if dims == 1:
817 | arrayUpsampled = np.zeros(((sz[0]-1)*(nZeros+1)+1, sz[1]))
818 | for col in range(sz[0]):
819 | arrayUpsampled[col*(nZeros)+col,:] = array[col,:]
820 | if dims == 2:
821 | arrayUpsampled = np.zeros((sz[0], ((sz[1]-1)*(nZeros+1)+1)))
822 | for row in range(sz[1]):
823 | arrayUpsampled[:,row*(nZeros)+row] = array[:,row]
824 | return arrayUpsampled
825 |
826 | #
827 | ##############################################################################
828 |
829 |
830 | ##############################################################################
831 | #
832 |
833 | def down2D(t, d,dims=(-2,-1)):
834 | """
835 | Input:
836 | t: tensor to subsample
837 | d: decimation factor
838 | dims: (x,y) which gives the height and width dimension indices (s.t. t.shape[x] == H and t.shape[y] == W )
839 | Output:
840 | subsampled tensor
841 | """
842 |
843 | H, W = t.shape[dims[0]], t.shape[dims[1]]
844 | if H % d != 0:
845 | print(f'Warning: dimension {H} is not divisible by {d}')
846 |
847 | ndims = len(t.shape)
848 | rowIdxs = [slice(None)]*ndims
849 | rowIdxs[dims[0]] = slice(0,H,d)
850 | colIdxs = [slice(None)]*ndims
851 | colIdxs[dims[1]] = slice(0,W,d)
852 |
853 | return t[rowIdxs][colIdxs]
854 |
855 |
856 | def up2D(t,d,dims=(-2,-1)):
857 | """
858 | 2D upscaling of tensor
859 |
860 | Input
861 | -----
862 | t : tensor to zero-upsample
863 | d : upsample factor
864 | dims : (x,y) which gives the height and width dimension indices (s.t. t.shape[x] == H and t.shape[y] == W )
865 |
866 | Output
867 | -----
868 | tensor t upsampled by filling in zeros & elements scaled with factor d**2
869 |
870 | Warning
871 | -----
872 | We assume there is no extra padding needed to get to the desired upscaled resolution
873 |
874 | """
875 | # Fill in zero rows
876 | H, W = t.shape[dims[0]], t.shape[dims[1]]
877 | uH, uW = d*H, d*W
878 | newshape = list(t.shape)
879 | ndims = len(t.shape)
880 | newshape[dims[0]] = uH
881 | newshape[dims[1]] = uW
882 |
883 | rowIdxs = [slice(None)]*ndims
884 | rowIdxs[dims[0]] = slice(0,uH,d)
885 | colIdxs = [slice(None)]*ndims
886 | colIdxs[dims[1]] = slice(0,uW,d)
887 |
888 | device = t.device
889 | r = torch.zeros(newshape,device=device)
890 | r[rowIdxs][colIdxs] = t
891 |
892 | return r
893 |
894 |
895 | def subsampleBands(t,idxs,decimFactors):
896 | """
897 | Input
898 | -----
899 | t : tensor [N, C, H, W, M] of M shearlet bands
900 | idxs : an array of size M where idxs[m] = s indicates to which scale s band m belongs
901 | decimFactors : reverse(decimFactors)[i] gives the decimation factor required for scale i
902 |
903 | Output
904 | -----
905 | dictionary R where R[d] gives the tensor [N, C, Hd, Wd, Md] of bands that are decimated by factor d
906 | """
907 | idxsPerDecim = getIdxsPerDecim(idxs,decimFactors)
908 | result = dict()
909 | for d in decimFactors:
910 | # Downscale relevant bands
911 | bands = down2D(t[...,idxsPerDecim[d]],d,dims=(-3,-2))
912 | result[d] = bands
913 | return result
914 |
915 | def upAndMergeBands(bandsDict,idxs,decimFactors):
916 | """
917 | Input
918 | -----
919 | bandsDict : dictionary of bands where bandsDict[d] gives the tensor [N, C, Hd, Wd, Md] of the Md bands decimated by factor d
920 | idxs : an array of size M where idxs[m] = s indicates to which scale s band m belongs
921 | decimFactors : reverse(decimFactors)[i] gives the decimation factor required for scale i
922 | Output
923 | -----
924 | NxCxHxWxM tensor of shearlet bands where M = sum(Md for d)
925 |
926 | """
927 | idxsPerDecim = getIdxsPerDecim(idxs,decimFactors)
928 |
929 | M = 0
930 | uppedDict = dict()
931 | for d,t in bandsDict.items():
932 | bands = up2D(t,d,dims=(-3,-2))
933 | uppedDict[d] = bands
934 | N, C, H, W, m = bands.shape
935 | M += m
936 |
937 | device = bands.device
938 | result = torch.zeros((N,C,H,W,M),device=device)
939 |
940 | # Can be done in previous loop if we know H,W,M beforehand
941 | for d,t in uppedDict.items():
942 | result[...,idxsPerDecim[d]] = t*(d**2) # Scale elements with factor d (once per dimension) to make up for "lost energy"
943 |
944 | return result
945 |
946 |
947 |
948 |
949 | def getIdxsPerDecim(idxs, decimFactors):
950 | """
951 | Input
952 | -----
953 | idxs : an array of size M where idxs[m] = s indicates to which scale s band m belongs
954 | decimFactors : reverse(decimFactors)[i] gives the decimation factor required for scale i
955 | Output
956 | -----
957 | A dict R where R[d] gives the tuple of band indices which require a decimation factor d
958 | """
959 | scales = len(decimFactors) # Number of scales, including lowpass scale 0
960 | decimFactors = np.flip(decimFactors)
961 | result = dict()
962 | for d in decimFactors:
963 | # Avoid processing the same decimation factor twice
964 | if result.get(d) is not None:
965 | continue
966 |
967 | d_band_indices = ()
968 | for scale in [s for s in range(scales) if decimFactors[s] == d]:
969 | for m in range(len(idxs)):
970 | if idxs[m] == scale:
971 | d_band_indices += (m,)
972 | result[d] = d_band_indices
973 | return result
--------------------------------------------------------------------------------