Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
  • Loading branch information
Jorgedavyd committed Jun 11, 2024
2 parents 8d14cf1 + 431d67a commit 02648ec
Show file tree
Hide file tree
Showing 46 changed files with 454 additions and 172 deletions.
Binary file added lightorch/__pycache__/__init__.cpython-310.pyc
Binary file not shown.
Binary file added lightorch/__pycache__/_version.cpython-310.pyc
Binary file not shown.
Binary file added lightorch/__pycache__/hparams.cpython-310.pyc
Binary file not shown.
1 change: 1 addition & 0 deletions lightorch/antique.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
# REASON: Optimization performance against other libraries (lightning)
# requires tensorboard installed


def create_config(name_run: str):
os.makedirs(f"./{name_run}/models", exist_ok=True)
return {
Expand Down
16 changes: 9 additions & 7 deletions lightorch/hparams.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,20 +43,22 @@ def objective(trial: optuna.trial.Trial):
trainer.fit(model, datamodule=dataset)

if isinstance(valid_metrics, str):
if valid_metrics == 'hp_metric':
if valid_metrics == "hp_metric":
return trainer.callback_metrics[valid_metrics].item()
return trainer.callback_metrics[f'Training/{valid_metrics}'].item()
return trainer.callback_metrics[f"Training/{valid_metrics}"].item()

else:
out = []
for valid_metric in valid_metrics:
if valid_metric == 'hp_metric':
if valid_metric == "hp_metric":
out.append(trainer.callback_metrics[valid_metric].item())
else:
out.append(trainer.callback_metrics[f'Training/{valid_metric}'].item())

out.append(
trainer.callback_metrics[f"Training/{valid_metric}"].item()
)

return out

if "precision" in kwargs:
torch.set_float32_matmul_precision(precision)
else:
Expand Down
Binary file added lightorch/nn/__pycache__/__init__.cpython-310.pyc
Binary file not shown.
Binary file added lightorch/nn/__pycache__/complex.cpython-310.pyc
Binary file not shown.
Binary file not shown.
Binary file added lightorch/nn/__pycache__/dnn.cpython-310.pyc
Binary file not shown.
Binary file added lightorch/nn/__pycache__/fourier.cpython-310.pyc
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file added lightorch/nn/__pycache__/normalization.cpython-310.pyc
Binary file not shown.
Binary file added lightorch/nn/__pycache__/partial.cpython-310.pyc
Binary file not shown.
Binary file added lightorch/nn/__pycache__/utils.cpython-310.pyc
Binary file not shown.
4 changes: 3 additions & 1 deletion lightorch/nn/criterions.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,9 @@ def __init__(

class Loss(LighTorchLoss):
def __init__(self, *loss) -> None:
assert (len(set(map(type, loss))) == len(loss)), 'Not valid input classes, each should be different.'
assert len(set(map(type, loss))) == len(
loss
), "Not valid input classes, each should be different."
super().__init__(
list(set([*chain.from_iterable([i.labels for i in loss])])),
_merge_dicts([i.factors for i in loss]),
Expand Down
2 changes: 1 addition & 1 deletion lightorch/nn/dnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,4 +43,4 @@ def forward(self, input: Tensor) -> Tensor:
return self.dnn(input)


__all__ = ["DeepNeuralNetwork"]
__all__ = ["DeepNeuralNetwork"]
32 changes: 16 additions & 16 deletions lightorch/nn/fourier.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ def __init__(
n: int,
in_channels: int,
out_channels: int,
kernel_size: Union[Tuple[int],int],
padding: Union[Tuple[int],int] = None,
kernel_size: Union[Tuple[int], int],
padding: Union[Tuple[int], int] = None,
bias: bool = True,
eps: float = 1e-5,
pre_fft: bool = True,
Expand Down Expand Up @@ -66,7 +66,7 @@ def __init__(

self._init_parameters()

def get_padding(self, padding: Union[Tuple[int],int]) -> Sequence[int]:
def get_padding(self, padding: Union[Tuple[int], int]) -> Sequence[int]:
if isinstance(padding, tuple):
assert (
len(padding) == self.n
Expand Down Expand Up @@ -96,7 +96,7 @@ def __init__(
n: int,
in_channels: int,
out_channels: int,
kernel_size: Union[Tuple[int],int],
kernel_size: Union[Tuple[int], int],
padding: Tuple[int],
bias: bool = True,
eps: float = 0.00001,
Expand Down Expand Up @@ -125,8 +125,8 @@ def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[Tuple[int],int],
padding: Union[Tuple[int],int] = None,
kernel_size: Union[Tuple[int], int],
padding: Union[Tuple[int], int] = None,
bias: bool = True,
eps: float = 0.00001,
pre_fft: bool = True,
Expand Down Expand Up @@ -170,8 +170,8 @@ def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[Tuple[int],int],
padding: Union[Tuple[int],int] = None,
kernel_size: Union[Tuple[int], int],
padding: Union[Tuple[int], int] = None,
bias: bool = True,
eps: float = 0.00001,
pre_fft: bool = True,
Expand Down Expand Up @@ -216,8 +216,8 @@ def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[Tuple[int],int],
padding: Union[Tuple[int],int] = None,
kernel_size: Union[Tuple[int], int],
padding: Union[Tuple[int], int] = None,
bias: bool = True,
eps: float = 0.00001,
pre_fft: bool = True,
Expand Down Expand Up @@ -263,8 +263,8 @@ def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[Tuple[int],int],
padding: Union[Tuple[int],int] = None,
kernel_size: Union[Tuple[int], int],
padding: Union[Tuple[int], int] = None,
bias: bool = True,
eps: float = 0.00001,
pre_fft: bool = True,
Expand Down Expand Up @@ -308,8 +308,8 @@ def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[Tuple[int],int],
padding: Union[Tuple[int],int] = None,
kernel_size: Union[Tuple[int], int],
padding: Union[Tuple[int], int] = None,
bias: bool = True,
eps: float = 0.00001,
pre_fft: bool = True,
Expand Down Expand Up @@ -353,8 +353,8 @@ def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[Tuple[int],int],
padding: Union[Tuple[int],int] = None,
kernel_size: Union[Tuple[int], int],
padding: Union[Tuple[int], int] = None,
bias: bool = True,
eps: float = 0.00001,
pre_fft: bool = True,
Expand Down
18 changes: 10 additions & 8 deletions lightorch/nn/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,9 @@
from .utils import FeatureExtractor2D


def _fourierconvNd(n: int, x: Tensor, weight: Tensor, bias: Union[Tensor,None]) -> Tensor:
def _fourierconvNd(
n: int, x: Tensor, weight: Tensor, bias: Union[Tensor, None]
) -> Tensor:
# To fourier space
weight = fftn(weight, dim=[-i for i in range(1, n + 1)])

Expand All @@ -22,7 +24,7 @@ def _fourierconvNd(n: int, x: Tensor, weight: Tensor, bias: Union[Tensor,None])


def _fourierdeconvNd(
n: int, x: Tensor, weight: Tensor, bias: Union[Tensor,None], eps: float = 1e-5
n: int, x: Tensor, weight: Tensor, bias: Union[Tensor, None], eps: float = 1e-5
) -> Tensor:
# To fourier space
weight = fftn(weight, dim=[-i for i in range(1, n + 1)])
Expand All @@ -37,7 +39,7 @@ def _fourierdeconvNd(
return x


def fourierconv3d(x: Tensor, one: Tensor, weight: Tensor, bias: Union[Tensor,None]):
def fourierconv3d(x: Tensor, one: Tensor, weight: Tensor, bias: Union[Tensor, None]):
"""
x (Tensor): batch size, channels, height, width
weight (Tensor): out channels, *kernel_size
Expand Down Expand Up @@ -74,7 +76,7 @@ def fourierconv3d(x: Tensor, one: Tensor, weight: Tensor, bias: Union[Tensor,Non
return out


def fourierconv2d(x: Tensor, one: Tensor, weight: Tensor, bias: Union[Tensor,None]):
def fourierconv2d(x: Tensor, one: Tensor, weight: Tensor, bias: Union[Tensor, None]):
"""
x (Tensor): batch size, channels, height, width
weight (Tensor): out channels, *kernel_size
Expand Down Expand Up @@ -107,7 +109,7 @@ def fourierconv2d(x: Tensor, one: Tensor, weight: Tensor, bias: Union[Tensor,Non
return out


def fourierconv1d(x: Tensor, one: Tensor, weight: Tensor, bias: Union[Tensor,None]):
def fourierconv1d(x: Tensor, one: Tensor, weight: Tensor, bias: Union[Tensor, None]):
"""
x (Tensor): batch size, channels, sequence length
weight (Tensor): out channels, kernel_size
Expand All @@ -130,7 +132,7 @@ def fourierconv1d(x: Tensor, one: Tensor, weight: Tensor, bias: Union[Tensor,Non


def fourierdeconv3d(
x: Tensor, one: Tensor, weight: Tensor, bias: Union[Tensor,None], eps: float = 1e-5
x: Tensor, one: Tensor, weight: Tensor, bias: Union[Tensor, None], eps: float = 1e-5
):
"""
x (Tensor): batch size, channels, height, width
Expand Down Expand Up @@ -169,7 +171,7 @@ def fourierdeconv3d(


def fourierdeconv2d(
x: Tensor, one: Tensor, weight: Tensor, bias: Union[Tensor,None], eps: float = 1e-5
x: Tensor, one: Tensor, weight: Tensor, bias: Union[Tensor, None], eps: float = 1e-5
):
"""
x (Tensor): batch size, channels, height, width
Expand Down Expand Up @@ -204,7 +206,7 @@ def fourierdeconv2d(


def fourierdeconv1d(
x: Tensor, one: Tensor, weight: Tensor, bias: Union[Tensor,None], eps: float = 1e-5
x: Tensor, one: Tensor, weight: Tensor, bias: Union[Tensor, None], eps: float = 1e-5
):
"""
x (Tensor): batch size, channels, sequence length
Expand Down
Binary file not shown.
Binary file not shown.
73 changes: 43 additions & 30 deletions lightorch/nn/sequential/residual.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,21 @@
from typing import Union, Tuple, Callable, Any, Union
from ..functional import residual_connection


class _Residual(nn.Module):
def __init__(self, module: Union[nn.Module, Callable[[int, int], nn.Module]], n_layers: int):
def __init__(
self, module: Union[nn.Module, Callable[[int, int], nn.Module]], n_layers: int
):
super().__init__()
self.model = nn.ModuleList([module for _ in range(n_layers)])

def forward(self, x: Tensor) -> Tensor:
for layer in self.model:
x, _ = residual_connection(x, lambda x: layer(x))

return x


class LSTM(_Residual):
def __init__(
self,
Expand All @@ -21,25 +26,29 @@ def __init__(
res_layers: int,
bias: bool = True,
batch_first: bool = True,
dropout: float = 0.,
dropout: float = 0.0,
bidirectional: bool = False,
proj_size: int = 0,
device: Union[Any, None] = None,
dtype: Union[Any, None] = None,
) -> None:
super().__init__(nn.LSTM(
input_size,
hidden_size,
lstm_layers,
bias,
batch_first,
dropout,
bidirectional,
proj_size,
device,
dtype
), res_layers)

super().__init__(
nn.LSTM(
input_size,
hidden_size,
lstm_layers,
bias,
batch_first,
dropout,
bidirectional,
proj_size,
device,
dtype,
),
res_layers,
)


class GRU(_Residual):
def __init__(
self,
Expand All @@ -49,21 +58,25 @@ def __init__(
res_layers: int,
bias: bool = True,
batch_first: bool = True,
dropout: float = 0.,
dropout: float = 0.0,
bidirectional: bool = False,
device: Union[Any, None] = None,
dtype: Union[Any, None] = None,
) -> None:
super().__init__(nn.GRU(
input_size,
hidden_size,
gru_layers,
bias,
batch_first,
dropout,
bidirectional,
device,
dtype,
), res_layers)

__all__ = ['LSTM', 'GRU']
super().__init__(
nn.GRU(
input_size,
hidden_size,
gru_layers,
bias,
batch_first,
dropout,
bidirectional,
device,
dtype,
),
res_layers,
)


__all__ = ["LSTM", "GRU"]
2 changes: 1 addition & 1 deletion lightorch/nn/transformer/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@
from .embedding import *
from .positional import *
from .ffn import *
from .transformer import *
from .transformer import *
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Loading

0 comments on commit 02648ec

Please sign in to comment.