Skip to content

Commit 633e3c7

Browse files
committed
Update docs
1 parent a2c0566 commit 633e3c7

File tree

2 files changed

+24
-45
lines changed

2 files changed

+24
-45
lines changed

src/ptwt/packets.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -83,14 +83,14 @@ def __init__(
8383
8484
Args:
8585
data (torch.Tensor, optional): The input time series to transform.
86-
By default the last axis is transformed.
86+
By default, the last axis is transformed.
8787
Use the `axis` argument to choose another dimension.
8888
If None, the object is initialized without performing a decomposition.
8989
wavelet (Wavelet or str): A pywt wavelet compatible object or
9090
the name of a pywt wavelet.
9191
Refer to the output from ``pywt.wavelist(kind='discrete')``
9292
for possible choices.
93-
mode: The desired mode to handle signal boundaries. Select either the
93+
mode: The desired mode to handle signal boundaries. Select either
9494
the sparse-matrix backend (``boundary``) or a padding mode.
9595
See :data:`ptwt.constants.ExtendedBoundaryMode`.
9696
Defaults to ``reflect``.
@@ -112,6 +112,7 @@ def __init__(
112112
is not supported.
113113
114114
Example:
115+
115116
>>> import torch, pywt, ptwt
116117
>>> import numpy as np
117118
>>> import scipy.signal

src/ptwt/wavelets_learnable.py

Lines changed: 21 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,8 @@
1212
class WaveletFilter(ABC):
1313
"""Interface for learnable wavelets.
1414
15-
Each wavelet has a filter bank loss function
16-
and comes with functionality that tests the perfect
17-
reconstruction and anti-aliasing conditions.
15+
Each wavelet has a filter bank loss function and comes with functionality that tests
16+
the perfect reconstruction and antialiasing conditions.
1817
"""
1918

2019
@property
@@ -44,13 +43,12 @@ def pf_alias_cancellation_loss(
4443
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
4544
"""Return the product filter-alias cancellation loss.
4645
47-
See: Strang+Nguyen 105: $$F_0(z) = H_1(-z); F_1(z) = -H_0(-z)$$
48-
Alternating sign convention from 0 to N see Strang overview
49-
on the back of the cover.
46+
See: Strang+Nguyen 105: $$F_0(z) = H_1(-z); F_1(z) = -H_0(-z)$$ Alternating sign
47+
convention from 0 to N see Strang overview on the back of the cover.
5048
5149
Returns:
52-
The numerical value of the alias cancellation loss,
53-
as well as both loss components for analysis.
50+
The numerical value of the alias cancellation loss, as well as both loss
51+
components for analysis.
5452
"""
5553
dec_lo, dec_hi, rec_lo, rec_hi = self.filter_bank
5654
m1 = torch.tensor([-1], device=dec_lo.device, dtype=dec_lo.dtype)
@@ -78,13 +76,12 @@ def alias_cancellation_loss(
7876
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
7977
"""Return the alias cancellation loss.
8078
81-
Implementation of the ac-loss as described
82-
on page 104 of Strang+Nguyen.
79+
Implementation of the ac-loss as described on page 104 of Strang+Nguyen.
8380
$$F_0(z)H_0(-z) + F_1(z)H_1(-z) = 0$$
8481
8582
Returns:
86-
The numerical value of the alias cancellation loss,
87-
as well as both loss components for analysis.
83+
The numerical value of the alias cancellation loss, as well as both loss
84+
components for analysis.
8885
"""
8986
dec_lo, dec_hi, rec_lo, rec_hi = self.filter_bank
9087
m1 = torch.tensor([-1], device=dec_lo.device, dtype=dec_lo.dtype)
@@ -120,8 +117,8 @@ def perfect_reconstruction_loss(
120117
"""Return the perfect reconstruction loss.
121118
122119
Returns:
123-
The numerical value of the alias cancellation loss,
124-
as well as both intermediate values for analysis.
120+
The numerical value of the alias cancellation loss, as well as both
121+
intermediate values for analysis.
125122
"""
126123
# Strang 107: Assuming alias cancellation holds:
127124
# P(z) = F(z)H(z)
@@ -174,14 +171,14 @@ def __init__(
174171
dec_hi: torch.Tensor,
175172
rec_lo: torch.Tensor,
176173
rec_hi: torch.Tensor,
177-
):
174+
) -> None:
178175
"""Create a Product filter object.
179176
180177
Args:
181-
dec_lo (torch.Tensor): Low pass analysis filter.
182-
dec_hi (torch.Tensor): High pass analysis filter.
183-
rec_lo (torch.Tensor): Low pass synthesis filter.
184-
rec_hi (torch.Tensor): High pass synthesis filter.
178+
dec_lo : Low pass analysis filter.
179+
dec_hi : High pass analysis filter.
180+
rec_lo : Low pass synthesis filter.
181+
rec_hi : High pass synthesis filter.
185182
"""
186183
super().__init__()
187184
self.dec_lo = torch.nn.Parameter(dec_lo)
@@ -223,29 +220,11 @@ def wavelet_loss(self) -> torch.Tensor:
223220
class SoftOrthogonalWavelet(ProductFilter, torch.nn.Module):
224221
"""Orthogonal wavelets with a soft orthogonality constraint."""
225222

226-
def __init__(
227-
self,
228-
dec_lo: torch.Tensor,
229-
dec_hi: torch.Tensor,
230-
rec_lo: torch.Tensor,
231-
rec_hi: torch.Tensor,
232-
):
233-
"""Create a SoftOrthogonalWavelet object.
234-
235-
Args:
236-
dec_lo (torch.Tensor): Low pass analysis filter.
237-
dec_hi (torch.Tensor): High pass analysis filter.
238-
rec_lo (torch.Tensor): Low pass synthesis filter.
239-
rec_hi (torch.Tensor): High pass synthesis filter.
240-
"""
241-
super().__init__(dec_lo, dec_hi, rec_lo, rec_hi)
242-
243223
def rec_lo_orthogonality_loss(self) -> torch.Tensor:
244224
"""Return a Strang inspired soft orthogonality loss.
245225
246-
See Strang p. 148/149 or Harbo p. 80.
247-
Since L is a convolution matrix, LL^T can be evaluated
248-
trough convolution.
226+
See Strang p. 148/149 or Harbo p. 80. Since L is a convolution matrix, LL^T can
227+
be evaluated trough convolution.
249228
250229
Returns:
251230
A tensor with the orthogonality constraint value.
@@ -276,10 +255,9 @@ def rec_lo_orthogonality_loss(self) -> torch.Tensor:
276255
def filt_bank_orthogonality_loss(self) -> torch.Tensor:
277256
"""Return a Jensen+Harbo inspired soft orthogonality loss.
278257
279-
On Page 79 of the Book Ripples in Mathematics
280-
by Jensen la Cour-Harbo, the constraint
281-
g0[k] = h0[-k] and g1[k] = h1[-k] for orthogonal filters
282-
is presented. A measurement is implemented below.
258+
On Page 79 of the Book Ripples in Mathematics by Jensen la Cour-Harbo, the
259+
constraint g0[k] = h0[-k] and g1[k] = h1[-k] for orthogonal filters is
260+
presented. A measurement is implemented below.
283261
284262
Returns:
285263
A tensor with the orthogonality constraint value.

0 commit comments

Comments
 (0)