torchcnnbuilder.latent
1from math import prod 2from typing import Optional, Sequence 3 4import torch 5import torch.nn as nn 6 7from torchcnnbuilder._validation import ( 8 _validate_latent_layers, 9 _validate_latent_shape, 10 _validate_warning_huge_linear_weights_matrix, 11) 12 13 14# ------------------------------------ 15# Linear LatentSpaceModule class 16# ------------------------------------ 17class LatentSpaceModule(nn.Module): 18 """ 19 Module for transforming a tensor from one latent space shape to another using linear fully connected layers. 20 """ 21 22 def __init__( 23 self, 24 input_shape: Sequence[int], 25 output_shape: Sequence[int], 26 n_layers: int = 1, 27 activation_function: Optional[nn.Module] = None, 28 ): 29 """ 30 This module reshapes an input tensor into a specified output shape, with an optional sequence of 31 fully connected layers and activation functions to modulate the transformation. 32 33 Args: 34 input_shape (Sequence[int]): Shape of the input tensor. 35 output_shape (Sequence[int]): Desired shape of the output tensor. 36 n_layers (int, optional): Number of linear layers to use in the transformation. Defaults to 1. 37 activation_function (Optional[nn.Module], optional): Activation function to apply after each layer. 38 If None, no activation is applied. Defaults to None. 39 40 Raises: 41 ValueError: If `input_shape`, `output_shape`, or `n_layers` are invalid. 42 """ 43 44 super().__init__() 45 _validate_latent_shape(input_shape) 46 _validate_latent_shape(output_shape) 47 _validate_latent_layers(n_layers) 48 49 self._input_shape = input_shape 50 self._output_shape = output_shape 51 self._n_layers = n_layers 52 self._activation_function = activation_function 53 54 input_features = prod(self._input_shape) 55 output_features = prod(self._output_shape) 56 57 flatten_layer = nn.Flatten() 58 unflatten_layer = nn.Unflatten(1, self._output_shape) 59 60 if n_layers > 1: 61 log_input = torch.log(torch.tensor(input_features, dtype=torch.int)) 62 log_output = torch.log(torch.tensor(output_features, dtype=torch.int)) 63 features = torch.exp(torch.linspace(log_input, log_output, steps=n_layers + 1)).tolist() 64 features = list(map(int, features)) 65 else: 66 features = [input_features, output_features] 67 68 latent_layers = [flatten_layer] 69 for i in range(self._n_layers): 70 in_features, out_features = features[i], features[i + 1] 71 72 _validate_warning_huge_linear_weights_matrix( 73 in_features, out_features, level=f"linear latent layer number {i}" 74 ) 75 76 latent_layers.append(nn.Linear(in_features, out_features)) 77 if activation_function is not None: 78 latent_layers.append(activation_function) 79 80 latent_layers.append(unflatten_layer) 81 self._resize = nn.Sequential(*latent_layers) 82 83 @property 84 def input_shape(self) -> Sequence[int]: 85 """ 86 Returns the shape of the input tensor. 87 88 Returns: 89 Shape of the input tensor. 90 """ 91 return self._input_shape 92 93 @property 94 def output_shape(self) -> Sequence[int]: 95 """ 96 Returns the shape of the output tensor. 97 98 Returns: 99 Shape of the output tensor. 100 """ 101 return self._output_shape 102 103 def forward(self, x): 104 """ 105 Performs a forward pass through the module, transforming the input tensor. 106 107 Args: 108 x (torch.Tensor): Input tensor to be transformed. 109 110 Returns: 111 Transformed output tensor with the specified output shape. 112 """ 113 return self._resize(x) 114 115 def __repr__(self): 116 """ 117 Custom string representation of the module 118 """ 119 default_repr_model_params = [f"input_shape={self._input_shape}", f"output_shape={self._output_shape}"] 120 121 if self._n_layers != 1: 122 default_repr_model_params.append(f"n_layers={self._n_layers}") 123 124 if self._activation_function is not None: 125 default_repr_model_params.append(f"activation_function={self._activation_function}") 126 127 return f"LatentSpaceModule({', '.join(default_repr_model_params)})"
class
LatentSpaceModule(torch.nn.modules.module.Module):
18class LatentSpaceModule(nn.Module): 19 """ 20 Module for transforming a tensor from one latent space shape to another using linear fully connected layers. 21 """ 22 23 def __init__( 24 self, 25 input_shape: Sequence[int], 26 output_shape: Sequence[int], 27 n_layers: int = 1, 28 activation_function: Optional[nn.Module] = None, 29 ): 30 """ 31 This module reshapes an input tensor into a specified output shape, with an optional sequence of 32 fully connected layers and activation functions to modulate the transformation. 33 34 Args: 35 input_shape (Sequence[int]): Shape of the input tensor. 36 output_shape (Sequence[int]): Desired shape of the output tensor. 37 n_layers (int, optional): Number of linear layers to use in the transformation. Defaults to 1. 38 activation_function (Optional[nn.Module], optional): Activation function to apply after each layer. 39 If None, no activation is applied. Defaults to None. 40 41 Raises: 42 ValueError: If `input_shape`, `output_shape`, or `n_layers` are invalid. 43 """ 44 45 super().__init__() 46 _validate_latent_shape(input_shape) 47 _validate_latent_shape(output_shape) 48 _validate_latent_layers(n_layers) 49 50 self._input_shape = input_shape 51 self._output_shape = output_shape 52 self._n_layers = n_layers 53 self._activation_function = activation_function 54 55 input_features = prod(self._input_shape) 56 output_features = prod(self._output_shape) 57 58 flatten_layer = nn.Flatten() 59 unflatten_layer = nn.Unflatten(1, self._output_shape) 60 61 if n_layers > 1: 62 log_input = torch.log(torch.tensor(input_features, dtype=torch.int)) 63 log_output = torch.log(torch.tensor(output_features, dtype=torch.int)) 64 features = torch.exp(torch.linspace(log_input, log_output, steps=n_layers + 1)).tolist() 65 features = list(map(int, features)) 66 else: 67 features = [input_features, output_features] 68 69 latent_layers = [flatten_layer] 70 for i in range(self._n_layers): 71 in_features, out_features = features[i], features[i + 1] 72 73 _validate_warning_huge_linear_weights_matrix( 74 in_features, out_features, level=f"linear latent layer number {i}" 75 ) 76 77 latent_layers.append(nn.Linear(in_features, out_features)) 78 if activation_function is not None: 79 latent_layers.append(activation_function) 80 81 latent_layers.append(unflatten_layer) 82 self._resize = nn.Sequential(*latent_layers) 83 84 @property 85 def input_shape(self) -> Sequence[int]: 86 """ 87 Returns the shape of the input tensor. 88 89 Returns: 90 Shape of the input tensor. 91 """ 92 return self._input_shape 93 94 @property 95 def output_shape(self) -> Sequence[int]: 96 """ 97 Returns the shape of the output tensor. 98 99 Returns: 100 Shape of the output tensor. 101 """ 102 return self._output_shape 103 104 def forward(self, x): 105 """ 106 Performs a forward pass through the module, transforming the input tensor. 107 108 Args: 109 x (torch.Tensor): Input tensor to be transformed. 110 111 Returns: 112 Transformed output tensor with the specified output shape. 113 """ 114 return self._resize(x) 115 116 def __repr__(self): 117 """ 118 Custom string representation of the module 119 """ 120 default_repr_model_params = [f"input_shape={self._input_shape}", f"output_shape={self._output_shape}"] 121 122 if self._n_layers != 1: 123 default_repr_model_params.append(f"n_layers={self._n_layers}") 124 125 if self._activation_function is not None: 126 default_repr_model_params.append(f"activation_function={self._activation_function}") 127 128 return f"LatentSpaceModule({', '.join(default_repr_model_params)})"
Module for transforming a tensor from one latent space shape to another using linear fully connected layers.
LatentSpaceModule( input_shape: Sequence[int], output_shape: Sequence[int], n_layers: int = 1, activation_function: Optional[torch.nn.modules.module.Module] = None)
23 def __init__( 24 self, 25 input_shape: Sequence[int], 26 output_shape: Sequence[int], 27 n_layers: int = 1, 28 activation_function: Optional[nn.Module] = None, 29 ): 30 """ 31 This module reshapes an input tensor into a specified output shape, with an optional sequence of 32 fully connected layers and activation functions to modulate the transformation. 33 34 Args: 35 input_shape (Sequence[int]): Shape of the input tensor. 36 output_shape (Sequence[int]): Desired shape of the output tensor. 37 n_layers (int, optional): Number of linear layers to use in the transformation. Defaults to 1. 38 activation_function (Optional[nn.Module], optional): Activation function to apply after each layer. 39 If None, no activation is applied. Defaults to None. 40 41 Raises: 42 ValueError: If `input_shape`, `output_shape`, or `n_layers` are invalid. 43 """ 44 45 super().__init__() 46 _validate_latent_shape(input_shape) 47 _validate_latent_shape(output_shape) 48 _validate_latent_layers(n_layers) 49 50 self._input_shape = input_shape 51 self._output_shape = output_shape 52 self._n_layers = n_layers 53 self._activation_function = activation_function 54 55 input_features = prod(self._input_shape) 56 output_features = prod(self._output_shape) 57 58 flatten_layer = nn.Flatten() 59 unflatten_layer = nn.Unflatten(1, self._output_shape) 60 61 if n_layers > 1: 62 log_input = torch.log(torch.tensor(input_features, dtype=torch.int)) 63 log_output = torch.log(torch.tensor(output_features, dtype=torch.int)) 64 features = torch.exp(torch.linspace(log_input, log_output, steps=n_layers + 1)).tolist() 65 features = list(map(int, features)) 66 else: 67 features = [input_features, output_features] 68 69 latent_layers = [flatten_layer] 70 for i in range(self._n_layers): 71 in_features, out_features = features[i], features[i + 1] 72 73 _validate_warning_huge_linear_weights_matrix( 74 in_features, out_features, level=f"linear latent layer number {i}" 75 ) 76 77 latent_layers.append(nn.Linear(in_features, out_features)) 78 if activation_function is not None: 79 latent_layers.append(activation_function) 80 81 latent_layers.append(unflatten_layer) 82 self._resize = nn.Sequential(*latent_layers)
This module reshapes an input tensor into a specified output shape, with an optional sequence of fully connected layers and activation functions to modulate the transformation.
Arguments:
- input_shape (Sequence[int]): Shape of the input tensor.
- output_shape (Sequence[int]): Desired shape of the output tensor.
- n_layers (int, optional): Number of linear layers to use in the transformation. Defaults to 1.
- activation_function (Optional[nn.Module], optional): Activation function to apply after each layer. If None, no activation is applied. Defaults to None.
Raises:
- ValueError: If
input_shape
,output_shape
, orn_layers
are invalid.
input_shape: Sequence[int]
84 @property 85 def input_shape(self) -> Sequence[int]: 86 """ 87 Returns the shape of the input tensor. 88 89 Returns: 90 Shape of the input tensor. 91 """ 92 return self._input_shape
Returns the shape of the input tensor.
Returns:
Shape of the input tensor.
output_shape: Sequence[int]
94 @property 95 def output_shape(self) -> Sequence[int]: 96 """ 97 Returns the shape of the output tensor. 98 99 Returns: 100 Shape of the output tensor. 101 """ 102 return self._output_shape
Returns the shape of the output tensor.
Returns:
Shape of the output tensor.
def
forward(self, x):
104 def forward(self, x): 105 """ 106 Performs a forward pass through the module, transforming the input tensor. 107 108 Args: 109 x (torch.Tensor): Input tensor to be transformed. 110 111 Returns: 112 Transformed output tensor with the specified output shape. 113 """ 114 return self._resize(x)
Performs a forward pass through the module, transforming the input tensor.
Arguments:
- x (torch.Tensor): Input tensor to be transformed.
Returns:
Transformed output tensor with the specified output shape.