Coverage for biobb_pytorch / test / unitests / test_mdae / test_specialized_models.py: 91%
85 statements
« prev ^ index » next coverage.py v7.13.2, created at 2026-02-02 16:33 +0000
« prev ^ index » next coverage.py v7.13.2, created at 2026-02-02 16:33 +0000
1# type: ignore
2"""
3Test suite for specialized model types that require specific configurations.
5This includes:
6- GaussianMixtureVariationalAutoEncoder (GMVAE): requires dict-based encoder/decoder config
7- SPIB: requires k parameter and specific configuration
8- CNNAutoEncoder (MoLearn): requires different architecture
10These models cannot be tested with the standard list-based encoder/decoder layer
11configuration and require specialized setups.
12"""
13import pytest
14import torch
15import tempfile
16from pathlib import Path
17from biobb_common.tools import test_fixtures as fx
18from biobb_pytorch.mdae.build_model import buildModel, BuildModel
21class TestGMVAE:
22 """Test suite for GaussianMixtureVariationalAutoEncoder."""
24 def setup_class(self):
25 """Setup test fixtures."""
26 fx.test_setup(self, 'buildModel')
28 def teardown_class(self):
29 """Cleanup after tests."""
30 fx.test_teardown(self)
32 def test_build_gmvae_with_proper_config(self):
33 """Test building GMVAE with proper dictionary-based encoder/decoder configuration."""
34 props = self.properties.copy()
35 props['model_type'] = 'GaussianMixtureVariationalAutoEncoder'
37 # GMVAE requires dictionary-based encoder/decoder layers with specific
38 # keys
39 props['encoder_layers'] = {
40 'qy_dims': [32, 16], # Cluster assignment network
41 'qz_dims': [32, 16] # Latent variable network
42 }
43 props['decoder_layers'] = {
44 'pz_dims': [16, 32], # Latent prior network
45 'px_dims': [16, 32] # Reconstruction network
46 }
48 # GMVAE also requires 'k' (number of clusters) in options
49 props['options']['k'] = 3
50 props['options']['encoder'] = {
51 'qy_nn': {},
52 'qz_nn': {}
53 }
54 props['options']['decoder'] = {
55 'pz_nn': {},
56 'px_nn': {}
57 }
59 with tempfile.NamedTemporaryFile(suffix='.pth', delete=False) as tmp:
60 tmp_path = tmp.name
62 try:
63 # This should work with proper configuration
64 buildModel(
65 properties=props,
66 input_stats_pt_path=self.paths['input_stats_pt_path'],
67 output_model_pth_path=tmp_path
68 )
70 assert Path(tmp_path).exists(
71 ), "GMVAE model file should be created"
73 model = torch.load(tmp_path, weights_only=False)
74 assert model.__class__.__name__ == 'GaussianMixtureVariationalAutoEncoder'
75 assert hasattr(model, 'encoder'), "GMVAE should have encoder"
76 assert hasattr(model, 'decoder'), "GMVAE should have decoder"
77 assert model.k == 3, "GMVAE should have k=3"
79 finally:
80 if Path(tmp_path).exists():
81 Path(tmp_path).unlink()
84class TestSPIB:
85 """Test suite for SPIB model."""
87 def setup_class(self):
88 """Setup test fixtures."""
89 fx.test_setup(self, 'buildModel')
91 def teardown_class(self):
92 """Cleanup after tests."""
93 fx.test_teardown(self)
95 def test_build_spib_with_proper_config(self):
96 """Test building SPIB with list-based configuration and k parameter."""
97 props = self.properties.copy()
98 props['model_type'] = 'SPIB'
100 # SPIB uses list-based layers but requires 'k' in options
101 props['encoder_layers'] = [32, 16]
102 props['decoder_layers'] = [16, 32]
103 props['options']['k'] = 2 # Number of states
105 with tempfile.NamedTemporaryFile(suffix='.pth', delete=False) as tmp:
106 tmp_path = tmp.name
108 try:
109 buildModel(
110 properties=props,
111 input_stats_pt_path=self.paths['input_stats_pt_path'],
112 output_model_pth_path=tmp_path
113 )
115 assert Path(tmp_path).exists(), "SPIB model file should be created"
117 model = torch.load(tmp_path, weights_only=False)
118 assert model.__class__.__name__ == 'SPIB'
119 assert hasattr(model, 'encoder'), "SPIB should have encoder"
120 assert hasattr(model, 'decoder'), "SPIB should have decoder"
121 assert model.k == 2, "SPIB should have k=2"
123 finally:
124 if Path(tmp_path).exists():
125 Path(tmp_path).unlink()
127 def test_spib_forward_pass(self):
128 """Test SPIB forward pass."""
129 props = self.properties.copy()
130 props['model_type'] = 'SPIB'
131 props['encoder_layers'] = [24, 12]
132 props['decoder_layers'] = [12, 24]
133 props['options']['k'] = 2
135 instance = BuildModel(
136 input_stats_pt_path=self.paths['input_stats_pt_path'],
137 output_model_pth_path=None,
138 properties=props
139 )
141 model = instance.model
142 stats = torch.load(
143 self.paths['input_stats_pt_path'],
144 weights_only=False)
145 n_features = stats['shape'][1]
147 batch_size = 4
148 dummy_input = torch.randn(batch_size, n_features)
150 model.eval()
151 with torch.no_grad():
152 try:
153 output = model(dummy_input)
154 # SPIB should return tensor or dict with latent representation
155 assert output is not None, "SPIB should produce output"
156 except Exception as e:
157 pytest.fail(f"SPIB forward pass failed: {str(e)}")
160class TestCNNAutoEncoder:
161 """Test suite for CNNAutoEncoder (MoLearn) model."""
163 def setup_class(self):
164 """Setup test fixtures."""
165 fx.test_setup(self, 'buildModel')
167 def teardown_class(self):
168 """Cleanup after tests."""
169 fx.test_teardown(self)
171 @pytest.mark.skip(reason="CNNAutoEncoder requires specialized 3D input configuration")
172 def test_build_cnn_autoencoder(self):
173 """
174 Test building CNNAutoEncoder.
176 Note: CNNAutoEncoder (from MoLearn) is designed for 3D molecular structures
177 and requires a completely different input format than other models.
178 It expects 3D coordinates as input, not feature vectors.
179 """
180 props = self.properties.copy()
181 props['model_type'] = 'CNNAutoEncoder'
182 props['n_cvs'] = 2
184 # CNNAutoEncoder uses different architecture
185 # This test is skipped because it needs specialized setup
186 pass
189# Summary comment for documentation
190"""
191Model Testing Summary
192=====================
194Testable with standard configuration (test_all_models.py):
195- AutoEncoder: Standard feedforward architecture ✓
196- VariationalAutoEncoder: Variational architecture with mean/var outputs ✓
198Testable with specialized configuration (this file):
199- GaussianMixtureVariationalAutoEncoder: Requires dict-based encoder/decoder ✓
200- SPIB: Requires k parameter for number of states ✓
202Requires extensive specialized setup:
203- CNNAutoEncoder: Designed for 3D molecular coordinates, not feature vectors
204 (from MoLearn framework, uses graph-based architecture)
206To run all model tests:
207 pytest biobb_pytorch/test/unitests/test_mdae/test_all_models.py -v
208 pytest biobb_pytorch/test/unitests/test_mdae/test_specialized_models.py -v
209"""