Coverage for biobb_pytorch / test / unitests / test_mdae / test_specialized_models.py: 91%

85 statements  

« prev     ^ index     » next       coverage.py v7.13.2, created at 2026-02-02 16:33 +0000

1# type: ignore 

2""" 

3Test suite for specialized model types that require specific configurations. 

4 

5This includes: 

6- GaussianMixtureVariationalAutoEncoder (GMVAE): requires dict-based encoder/decoder config 

7- SPIB: requires k parameter and specific configuration 

8- CNNAutoEncoder (MoLearn): requires different architecture 

9 

10These models cannot be tested with the standard list-based encoder/decoder layer 

11configuration and require specialized setups. 

12""" 

13import pytest 

14import torch 

15import tempfile 

16from pathlib import Path 

17from biobb_common.tools import test_fixtures as fx 

18from biobb_pytorch.mdae.build_model import buildModel, BuildModel 

19 

20 

21class TestGMVAE: 

22 """Test suite for GaussianMixtureVariationalAutoEncoder.""" 

23 

24 def setup_class(self): 

25 """Setup test fixtures.""" 

26 fx.test_setup(self, 'buildModel') 

27 

28 def teardown_class(self): 

29 """Cleanup after tests.""" 

30 fx.test_teardown(self) 

31 

32 def test_build_gmvae_with_proper_config(self): 

33 """Test building GMVAE with proper dictionary-based encoder/decoder configuration.""" 

34 props = self.properties.copy() 

35 props['model_type'] = 'GaussianMixtureVariationalAutoEncoder' 

36 

37 # GMVAE requires dictionary-based encoder/decoder layers with specific 

38 # keys 

39 props['encoder_layers'] = { 

40 'qy_dims': [32, 16], # Cluster assignment network 

41 'qz_dims': [32, 16] # Latent variable network 

42 } 

43 props['decoder_layers'] = { 

44 'pz_dims': [16, 32], # Latent prior network 

45 'px_dims': [16, 32] # Reconstruction network 

46 } 

47 

48 # GMVAE also requires 'k' (number of clusters) in options 

49 props['options']['k'] = 3 

50 props['options']['encoder'] = { 

51 'qy_nn': {}, 

52 'qz_nn': {} 

53 } 

54 props['options']['decoder'] = { 

55 'pz_nn': {}, 

56 'px_nn': {} 

57 } 

58 

59 with tempfile.NamedTemporaryFile(suffix='.pth', delete=False) as tmp: 

60 tmp_path = tmp.name 

61 

62 try: 

63 # This should work with proper configuration 

64 buildModel( 

65 properties=props, 

66 input_stats_pt_path=self.paths['input_stats_pt_path'], 

67 output_model_pth_path=tmp_path 

68 ) 

69 

70 assert Path(tmp_path).exists( 

71 ), "GMVAE model file should be created" 

72 

73 model = torch.load(tmp_path, weights_only=False) 

74 assert model.__class__.__name__ == 'GaussianMixtureVariationalAutoEncoder' 

75 assert hasattr(model, 'encoder'), "GMVAE should have encoder" 

76 assert hasattr(model, 'decoder'), "GMVAE should have decoder" 

77 assert model.k == 3, "GMVAE should have k=3" 

78 

79 finally: 

80 if Path(tmp_path).exists(): 

81 Path(tmp_path).unlink() 

82 

83 

84class TestSPIB: 

85 """Test suite for SPIB model.""" 

86 

87 def setup_class(self): 

88 """Setup test fixtures.""" 

89 fx.test_setup(self, 'buildModel') 

90 

91 def teardown_class(self): 

92 """Cleanup after tests.""" 

93 fx.test_teardown(self) 

94 

95 def test_build_spib_with_proper_config(self): 

96 """Test building SPIB with list-based configuration and k parameter.""" 

97 props = self.properties.copy() 

98 props['model_type'] = 'SPIB' 

99 

100 # SPIB uses list-based layers but requires 'k' in options 

101 props['encoder_layers'] = [32, 16] 

102 props['decoder_layers'] = [16, 32] 

103 props['options']['k'] = 2 # Number of states 

104 

105 with tempfile.NamedTemporaryFile(suffix='.pth', delete=False) as tmp: 

106 tmp_path = tmp.name 

107 

108 try: 

109 buildModel( 

110 properties=props, 

111 input_stats_pt_path=self.paths['input_stats_pt_path'], 

112 output_model_pth_path=tmp_path 

113 ) 

114 

115 assert Path(tmp_path).exists(), "SPIB model file should be created" 

116 

117 model = torch.load(tmp_path, weights_only=False) 

118 assert model.__class__.__name__ == 'SPIB' 

119 assert hasattr(model, 'encoder'), "SPIB should have encoder" 

120 assert hasattr(model, 'decoder'), "SPIB should have decoder" 

121 assert model.k == 2, "SPIB should have k=2" 

122 

123 finally: 

124 if Path(tmp_path).exists(): 

125 Path(tmp_path).unlink() 

126 

127 def test_spib_forward_pass(self): 

128 """Test SPIB forward pass.""" 

129 props = self.properties.copy() 

130 props['model_type'] = 'SPIB' 

131 props['encoder_layers'] = [24, 12] 

132 props['decoder_layers'] = [12, 24] 

133 props['options']['k'] = 2 

134 

135 instance = BuildModel( 

136 input_stats_pt_path=self.paths['input_stats_pt_path'], 

137 output_model_pth_path=None, 

138 properties=props 

139 ) 

140 

141 model = instance.model 

142 stats = torch.load( 

143 self.paths['input_stats_pt_path'], 

144 weights_only=False) 

145 n_features = stats['shape'][1] 

146 

147 batch_size = 4 

148 dummy_input = torch.randn(batch_size, n_features) 

149 

150 model.eval() 

151 with torch.no_grad(): 

152 try: 

153 output = model(dummy_input) 

154 # SPIB should return tensor or dict with latent representation 

155 assert output is not None, "SPIB should produce output" 

156 except Exception as e: 

157 pytest.fail(f"SPIB forward pass failed: {str(e)}") 

158 

159 

160class TestCNNAutoEncoder: 

161 """Test suite for CNNAutoEncoder (MoLearn) model.""" 

162 

163 def setup_class(self): 

164 """Setup test fixtures.""" 

165 fx.test_setup(self, 'buildModel') 

166 

167 def teardown_class(self): 

168 """Cleanup after tests.""" 

169 fx.test_teardown(self) 

170 

171 @pytest.mark.skip(reason="CNNAutoEncoder requires specialized 3D input configuration") 

172 def test_build_cnn_autoencoder(self): 

173 """ 

174 Test building CNNAutoEncoder. 

175 

176 Note: CNNAutoEncoder (from MoLearn) is designed for 3D molecular structures 

177 and requires a completely different input format than other models. 

178 It expects 3D coordinates as input, not feature vectors. 

179 """ 

180 props = self.properties.copy() 

181 props['model_type'] = 'CNNAutoEncoder' 

182 props['n_cvs'] = 2 

183 

184 # CNNAutoEncoder uses different architecture 

185 # This test is skipped because it needs specialized setup 

186 pass 

187 

188 

189# Summary comment for documentation 

190""" 

191Model Testing Summary 

192===================== 

193 

194Testable with standard configuration (test_all_models.py): 

195- AutoEncoder: Standard feedforward architecture ✓ 

196- VariationalAutoEncoder: Variational architecture with mean/var outputs ✓ 

197 

198Testable with specialized configuration (this file): 

199- GaussianMixtureVariationalAutoEncoder: Requires dict-based encoder/decoder ✓ 

200- SPIB: Requires k parameter for number of states ✓ 

201 

202Requires extensive specialized setup: 

203- CNNAutoEncoder: Designed for 3D molecular coordinates, not feature vectors 

204 (from MoLearn framework, uses graph-based architecture) 

205 

206To run all model tests: 

207 pytest biobb_pytorch/test/unitests/test_mdae/test_all_models.py -v 

208 pytest biobb_pytorch/test/unitests/test_mdae/test_specialized_models.py -v 

209"""