Coverage for biobb_ml/neural_networks/recurrent_neural_network.py: 88%

139 statements  

« prev     ^ index     » next       coverage.py v7.5.1, created at 2024-05-07 09:39 +0000

1#!/usr/bin/env python3 

2 

3"""Module containing the RecurrentNeuralNetwork class and the command line interface.""" 

4import argparse 

5import h5py 

6import json 

7import numpy as np 

8import pandas as pd 

9from biobb_common.generic.biobb_object import BiobbObject 

10from tensorflow.python.keras.saving import hdf5_format 

11from tensorflow.keras import Sequential 

12from tensorflow.keras.layers import Dense 

13from tensorflow.keras.layers import LSTM 

14from tensorflow.keras.callbacks import EarlyStopping 

15from biobb_common.configuration import settings 

16from biobb_common.tools import file_utils as fu 

17from biobb_common.tools.file_utils import launchlogger 

18from biobb_ml.neural_networks.common import check_input_path, check_output_path, getHeader, getTargetValue, split_sequence, plotResultsReg 

19 

20 

21class RecurrentNeuralNetwork(BiobbObject): 

22 """ 

23 | biobb_ml RecurrentNeuralNetwork 

24 | Wrapper of the TensorFlow Keras LSTM method using Recurrent Neural Networks. 

25 | Trains and tests a given dataset and save the complete model for a Recurrent Neural Network. Visit the `LSTM documentation page <https://www.tensorflow.org/api_docs/python/tf/keras/layers/LSTM>`_ in the TensorFlow Keras official website for further information. 

26 

27 Args: 

28 input_dataset_path (str): Path to the input dataset. File type: input. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/data/neural_networks/dataset_recurrent.csv>`_. Accepted formats: csv (edam:format_3752). 

29 output_model_path (str): Path to the output model file. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/neural_networks/ref_output_model_recurrent.h5>`_. Accepted formats: h5 (edam:format_3590). 

30 output_test_table_path (str) (Optional): Path to the test table file. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/neural_networks/ref_output_test_recurrent.csv>`_. Accepted formats: csv (edam:format_3752). 

31 output_plot_path (str) (Optional): Loss, accuracy and MSE plots. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/neural_networks/ref_output_plot_recurrent.png>`_. Accepted formats: png (edam:format_3603). 

32 properties (dic - Python dictionary object containing the tool parameters, not input/output files): 

33 * **target** (*dict*) - ({}) Dependent variable you want to predict from your dataset. You can specify either a column name or a column index. Formats: { "column": "column3" } or { "index": 21 }. In case of mulitple formats, the first one will be picked. 

34 * **validation_size** (*float*) - (0.2) [0~1|0.05] Represents the proportion of the dataset to include in the validation split. It should be between 0.0 and 1.0. 

35 * **window_size** (*int*) - (5) [0~100|1] Number of steps for each window of training model. 

36 * **test_size** (*int*) - (5) [0~100000|1] Represents the number of samples of the dataset to include in the test split. 

37 * **hidden_layers** (*list*) - (None) List of dictionaries with hidden layers values. Format: [ { 'size': 50, 'activation': 'relu' } ]. 

38 * **optimizer** (*string*) - ("Adam") Name of optimizer instance. Values: Adadelta (Adadelta optimization is a stochastic gradient descent method that is based on adaptive learning rate per dimension to address two drawbacks: the continual decay of learning rates throughout training and the need for a manually selected global learning rate), Adagrad (Adagrad is an optimizer with parameter-specific learning rates; which are adapted relative to how frequently a parameter gets updated during training. The more updates a parameter receives; the smaller the updates), Adam (Adam optimization is a stochastic gradient descent method that is based on adaptive estimation of first-order and second-order moments), Adamax (It is a variant of Adam based on the infinity norm. Default parameters follow those provided in the paper. Adamax is sometimes superior to adam; specially in models with embeddings), Ftrl (Optimizer that implements the FTRL algorithm), Nadam (Much like Adam is essentially RMSprop with momentum; Nadam is Adam with Nesterov momentum), RMSprop (Optimizer that implements the RMSprop algorithm), SGD (Gradient descent -with momentum- optimizer). 

39 * **learning_rate** (*float*) - (0.02) [0~100|0.01] Determines the step size at each iteration while moving toward a minimum of a loss function 

40 * **batch_size** (*int*) - (100) [0~1000|1] Number of samples per gradient update. 

41 * **max_epochs** (*int*) - (100) [0~1000|1] Number of epochs to train the model. As the early stopping is enabled, this is a maximum. 

42 * **normalize_cm** (*bool*) - (False) Whether or not to normalize the confusion matrix. 

43 * **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files. 

44 * **restart** (*bool*) - (False) [WF property] Do not execute if output files exist. 

45 

46 Examples: 

47 This is a use example of how to use the building block from Python:: 

48 

49 from biobb_ml.neural_networks.recurrent_neural_network import recurrent_neural_network 

50 prop = { 

51 'target': { 

52 'column': 'target' 

53 }, 

54 'window_size': 5, 

55 'validation_size': 0.2, 

56 'test_size': 0.2, 

57 'hidden_layers': [ 

58 { 

59 'size': 10, 

60 'activation': 'relu' 

61 }, 

62 { 

63 'size': 8, 

64 'activation': 'relu' 

65 } 

66 ], 

67 'optimizer': 'Adam', 

68 'learning_rate': 0.01, 

69 'batch_size': 32, 

70 'max_epochs': 150 

71 } 

72 recurrent_neural_network(input_dataset_path='/path/to/myDataset.csv', 

73 output_model_path='/path/to/newModel.h5', 

74 output_test_table_path='/path/to/newTable.csv', 

75 output_plot_path='/path/to/newPlot.png', 

76 properties=prop) 

77 

78 Info: 

79 * wrapped_software: 

80 * name: TensorFlow Keras LSTM 

81 * version: >2.1.0 

82 * license: MIT 

83 * ontology: 

84 * name: EDAM 

85 * schema: http://edamontology.org/EDAM.owl 

86 

87 """ 

88 

89 def __init__(self, input_dataset_path, output_model_path, 

90 output_test_table_path=None, output_plot_path=None, properties=None, **kwargs) -> None: 

91 properties = properties or {} 

92 

93 # Call parent class constructor 

94 super().__init__(properties) 

95 self.locals_var_dict = locals().copy() 

96 

97 # Input/Output files 

98 self.io_dict = { 

99 "in": {"input_dataset_path": input_dataset_path}, 

100 "out": {"output_model_path": output_model_path, "output_test_table_path": output_test_table_path, "output_plot_path": output_plot_path} 

101 } 

102 

103 # Properties specific for BB 

104 self.target = properties.get('target', '') 

105 self.validation_size = properties.get('validation_size', 0.1) 

106 self.window_size = properties.get('window_size', 5) 

107 self.test_size = properties.get('test_size', 5) 

108 self.hidden_layers = properties.get('hidden_layers', []) 

109 self.optimizer = properties.get('optimizer', 'Adam') 

110 self.learning_rate = properties.get('learning_rate', 0.02) 

111 self.batch_size = properties.get('batch_size', 100) 

112 self.max_epochs = properties.get('max_epochs', 100) 

113 self.normalize_cm = properties.get('normalize_cm', False) 

114 self.properties = properties 

115 

116 # Check the properties 

117 self.check_properties(properties) 

118 self.check_arguments() 

119 

120 def check_data_params(self, out_log, err_log): 

121 """ Checks all the input/output paths and parameters """ 

122 self.io_dict["in"]["input_dataset_path"] = check_input_path(self.io_dict["in"]["input_dataset_path"], "input_dataset_path", False, out_log, self.__class__.__name__) 

123 self.io_dict["out"]["output_model_path"] = check_output_path(self.io_dict["out"]["output_model_path"], "output_model_path", False, out_log, self.__class__.__name__) 

124 self.io_dict["out"]["output_test_table_path"] = check_output_path(self.io_dict["out"]["output_test_table_path"], "output_test_table_path", True, out_log, self.__class__.__name__) 

125 self.io_dict["out"]["output_plot_path"] = check_output_path(self.io_dict["out"]["output_plot_path"], "output_plot_path", True, out_log, self.__class__.__name__) 

126 

127 def build_model(self, input_shape): 

128 """ Builds Neural network according to hidden_layers property """ 

129 

130 # create model 

131 model = Sequential([]) 

132 

133 # if no hidden_layers provided, create manually a hidden layer with default values 

134 if not self.hidden_layers: 

135 self.hidden_layers = [{'size': 50, 'activation': 'relu'}] 

136 

137 # generate hidden_layers 

138 for i, layer in enumerate(self.hidden_layers): 

139 if i == 0: 

140 model.add(LSTM(layer['size'], activation=layer['activation'], kernel_initializer='he_normal', input_shape=input_shape)) # 1st hidden layer 

141 else: 

142 model.add(Dense(layer['size'], activation=layer['activation'], kernel_initializer='he_normal')) 

143 

144 model.add(Dense(1)) # output layer 

145 

146 return model 

147 

148 @launchlogger 

149 def launch(self) -> int: 

150 """Execute the :class:`RecurrentNeuralNetwork <neural_networks.recurrent_neural_network.RecurrentNeuralNetwork>` neural_networks.recurrent_neural_network.RecurrentNeuralNetwork object.""" 

151 

152 # check input/output paths and parameters 

153 self.check_data_params(self.out_log, self.err_log) 

154 

155 # Setup Biobb 

156 if self.check_restart(): 

157 return 0 

158 self.stage_files() 

159 

160 # load dataset 

161 fu.log('Getting dataset from %s' % self.io_dict["in"]["input_dataset_path"], self.out_log, self.global_log) 

162 if 'column' in self.target: 

163 labels = getHeader(self.io_dict["in"]["input_dataset_path"]) 

164 skiprows = 1 

165 else: 

166 labels = None 

167 skiprows = None 

168 data = pd.read_csv(self.io_dict["in"]["input_dataset_path"], header=None, sep="\\s+|;|:|,|\t", engine="python", skiprows=skiprows, names=labels) 

169 

170 # get target column 

171 target = data[getTargetValue(self.target)].to_numpy() 

172 

173 # split into samples 

174 X, y = split_sequence(target, self.window_size) 

175 # reshape into [samples, timesteps, features] 

176 X = X.reshape((X.shape[0], X.shape[1], 1)) 

177 

178 # train / test split 

179 fu.log('Creating train and test sets', self.out_log, self.global_log) 

180 X_train, X_test, y_train, y_test = X[:-self.test_size], X[-self.test_size:], y[:-self.test_size], y[-self.test_size:] 

181 

182 # build model 

183 fu.log('Building model', self.out_log, self.global_log) 

184 model = self.build_model((X_train.shape[1], 1)) 

185 

186 # model summary 

187 stringlist = [] 

188 model.summary(print_fn=lambda x: stringlist.append(x)) 

189 model_summary = "\n".join(stringlist) 

190 fu.log('Model summary:\n\n%s\n' % model_summary, self.out_log, self.global_log) 

191 

192 # get optimizer 

193 mod = __import__('tensorflow.keras.optimizers', fromlist=[self.optimizer]) 

194 opt_class = getattr(mod, self.optimizer) 

195 opt = opt_class(lr=self.learning_rate) 

196 # compile model 

197 model.compile(optimizer=opt, loss='mse', metrics=['mse', 'mae']) 

198 

199 # fitting 

200 fu.log('Training model', self.out_log, self.global_log) 

201 # set an early stopping mechanism 

202 # set patience=2, to be a bit tolerant against random validation loss increases 

203 early_stopping = EarlyStopping(patience=2) 

204 # fit the model 

205 mf = model.fit(X_train, 

206 y_train, 

207 batch_size=self.batch_size, 

208 epochs=self.max_epochs, 

209 callbacks=[early_stopping], 

210 validation_split=self.validation_size, 

211 verbose=1) 

212 

213 train_metrics = pd.DataFrame() 

214 train_metrics['metric'] = ['Train loss', ' Train MAE', 'Train MSE', 'Validation loss', 'Validation MAE', 'Validation MSE'] 

215 train_metrics['coefficient'] = [mf.history['loss'][-1], mf.history['mae'][-1], mf.history['mse'][-1], mf.history['val_loss'][-1], mf.history['val_mae'][-1], mf.history['val_mse'][-1]] 

216 

217 fu.log('Training metrics\n\nTRAINING METRICS TABLE\n\n%s\n' % train_metrics, self.out_log, self.global_log) 

218 

219 # testing 

220 fu.log('Testing model', self.out_log, self.global_log) 

221 test_loss, test_mse, test_mae = model.evaluate(X_test, y_test) 

222 

223 # predict data from X_test 

224 test_predictions = model.predict(X_test) 

225 test_predictions = np.around(test_predictions, decimals=2) 

226 tpr = np.squeeze(np.asarray(test_predictions)) 

227 

228 test_metrics = pd.DataFrame() 

229 test_metrics['metric'] = ['Test loss', 'Test MAE', 'Test MSE'] 

230 test_metrics['coefficient'] = [test_loss, test_mae, test_mse] 

231 

232 fu.log('Testing metrics\n\nTESTING METRICS TABLE\n\n%s\n' % test_metrics, self.out_log, self.global_log) 

233 

234 test_table = pd.DataFrame() 

235 test_table['prediction'] = tpr 

236 test_table['target'] = y_test 

237 test_table['residual'] = test_table['target'] - test_table['prediction'] 

238 test_table['difference %'] = np.absolute(test_table['residual']/test_table['target']*100) 

239 pd.set_option('display.float_format', lambda x: '%.2f' % x) 

240 # sort by difference in % 

241 test_table = test_table.sort_values(by=['difference %']) 

242 test_table = test_table.reset_index(drop=True) 

243 fu.log('TEST DATA\n\n%s\n' % test_table, self.out_log, self.global_log) 

244 

245 # save test data 

246 if (self.io_dict["out"]["output_test_table_path"]): 

247 fu.log('Saving testing data to %s' % self.io_dict["out"]["output_test_table_path"], self.out_log, self.global_log) 

248 test_table.to_csv(self.io_dict["out"]["output_test_table_path"], index=False, header=True) 

249 

250 # create test plot 

251 if (self.io_dict["out"]["output_plot_path"]): 

252 fu.log('Saving plot to %s' % self.io_dict["out"]["output_plot_path"], self.out_log, self.global_log) 

253 test_predictions = test_predictions.flatten() 

254 train_predictions = model.predict(X_train).flatten() 

255 plot = plotResultsReg(mf.history, y_test, test_predictions, y_train, train_predictions) 

256 plot.savefig(self.io_dict["out"]["output_plot_path"], dpi=150) 

257 

258 # save model and parameters 

259 vars_obj = { 

260 'target': self.target, 

261 'window_size': self.window_size, 

262 'type': 'recurrent' 

263 } 

264 variables = json.dumps(vars_obj) 

265 fu.log('Saving model to %s' % self.io_dict["out"]["output_model_path"], self.out_log, self.global_log) 

266 with h5py.File(self.io_dict["out"]["output_model_path"], mode='w') as f: 

267 hdf5_format.save_model_to_hdf5(model, f) 

268 f.attrs['variables'] = variables 

269 

270 # Copy files to host 

271 self.copy_to_host() 

272 

273 self.tmp_files.extend([ 

274 self.stage_io_dict.get("unique_dir") 

275 ]) 

276 self.remove_tmp_files() 

277 

278 self.check_arguments(output_files_created=True, raise_exception=False) 

279 

280 return 0 

281 

282 

283def recurrent_neural_network(input_dataset_path: str, output_model_path: str, output_test_table_path: str = None, output_plot_path: str = None, properties: dict = None, **kwargs) -> int: 

284 """Execute the :class:`RecurrentNeuralNetwork <neural_networks.recurrent_neural_network.RecurrentNeuralNetwork>` class and 

285 execute the :meth:`launch() <neural_networks.recurrent_neural_network.RecurrentNeuralNetwork.launch>` method.""" 

286 

287 return RecurrentNeuralNetwork(input_dataset_path=input_dataset_path, 

288 output_model_path=output_model_path, 

289 output_test_table_path=output_test_table_path, 

290 output_plot_path=output_plot_path, 

291 properties=properties, **kwargs).launch() 

292 

293 

294def main(): 

295 """Command line execution of this building block. Please check the command line documentation.""" 

296 parser = argparse.ArgumentParser(description="Wrapper of the TensorFlow Keras LSTM method.", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999)) 

297 parser.add_argument('--config', required=False, help='Configuration file') 

298 

299 # Specific args of each building block 

300 required_args = parser.add_argument_group('required arguments') 

301 required_args.add_argument('--input_dataset_path', required=True, help='Path to the input dataset. Accepted formats: csv.') 

302 required_args.add_argument('--output_model_path', required=True, help='Path to the output model file. Accepted formats: h5.') 

303 parser.add_argument('--output_test_table_path', required=False, help='Path to the test table file. Accepted formats: csv.') 

304 parser.add_argument('--output_plot_path', required=False, help='Loss, accuracy and MSE plots. Accepted formats: png.') 

305 

306 args = parser.parse_args() 

307 args.config = args.config or "{}" 

308 properties = settings.ConfReader(config=args.config).get_prop_dic() 

309 

310 # Specific call of each building block 

311 recurrent_neural_network(input_dataset_path=args.input_dataset_path, 

312 output_model_path=args.output_model_path, 

313 output_test_table_path=args.output_test_table_path, 

314 output_plot_path=args.output_plot_path, 

315 properties=properties) 

316 

317 

318if __name__ == '__main__': 

319 main()