Coverage for biobb_ml/neural_networks/recurrent_neural_network.py: 88%
138 statements
« prev ^ index » next coverage.py v7.6.1, created at 2024-10-03 14:57 +0000
« prev ^ index » next coverage.py v7.6.1, created at 2024-10-03 14:57 +0000
1#!/usr/bin/env python3
3"""Module containing the RecurrentNeuralNetwork class and the command line interface."""
4import argparse
5import h5py
6import json
7import numpy as np
8import pandas as pd
9from biobb_common.generic.biobb_object import BiobbObject
10from tensorflow.python.keras.saving import hdf5_format
11from tensorflow.keras import Sequential
12from tensorflow.keras.layers import Dense
13from tensorflow.keras.layers import LSTM
14from tensorflow.keras.callbacks import EarlyStopping
15from biobb_common.configuration import settings
16from biobb_common.tools import file_utils as fu
17from biobb_common.tools.file_utils import launchlogger
18from biobb_ml.neural_networks.common import check_input_path, check_output_path, getHeader, getTargetValue, split_sequence, plotResultsReg
21class RecurrentNeuralNetwork(BiobbObject):
22 """
23 | biobb_ml RecurrentNeuralNetwork
24 | Wrapper of the TensorFlow Keras LSTM method using Recurrent Neural Networks.
25 | Trains and tests a given dataset and save the complete model for a Recurrent Neural Network. Visit the `LSTM documentation page <https://www.tensorflow.org/api_docs/python/tf/keras/layers/LSTM>`_ in the TensorFlow Keras official website for further information.
27 Args:
28 input_dataset_path (str): Path to the input dataset. File type: input. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/data/neural_networks/dataset_recurrent.csv>`_. Accepted formats: csv (edam:format_3752).
29 output_model_path (str): Path to the output model file. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/neural_networks/ref_output_model_recurrent.h5>`_. Accepted formats: h5 (edam:format_3590).
30 output_test_table_path (str) (Optional): Path to the test table file. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/neural_networks/ref_output_test_recurrent.csv>`_. Accepted formats: csv (edam:format_3752).
31 output_plot_path (str) (Optional): Loss, accuracy and MSE plots. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/neural_networks/ref_output_plot_recurrent.png>`_. Accepted formats: png (edam:format_3603).
32 properties (dic - Python dictionary object containing the tool parameters, not input/output files):
33 * **target** (*dict*) - ({}) Dependent variable you want to predict from your dataset. You can specify either a column name or a column index. Formats: { "column": "column3" } or { "index": 21 }. In case of mulitple formats, the first one will be picked.
34 * **validation_size** (*float*) - (0.2) [0~1|0.05] Represents the proportion of the dataset to include in the validation split. It should be between 0.0 and 1.0.
35 * **window_size** (*int*) - (5) [0~100|1] Number of steps for each window of training model.
36 * **test_size** (*int*) - (5) [0~100000|1] Represents the number of samples of the dataset to include in the test split.
37 * **hidden_layers** (*list*) - (None) List of dictionaries with hidden layers values. Format: [ { 'size': 50, 'activation': 'relu' } ].
38 * **optimizer** (*string*) - ("Adam") Name of optimizer instance. Values: Adadelta (Adadelta optimization is a stochastic gradient descent method that is based on adaptive learning rate per dimension to address two drawbacks: the continual decay of learning rates throughout training and the need for a manually selected global learning rate), Adagrad (Adagrad is an optimizer with parameter-specific learning rates; which are adapted relative to how frequently a parameter gets updated during training. The more updates a parameter receives; the smaller the updates), Adam (Adam optimization is a stochastic gradient descent method that is based on adaptive estimation of first-order and second-order moments), Adamax (It is a variant of Adam based on the infinity norm. Default parameters follow those provided in the paper. Adamax is sometimes superior to adam; specially in models with embeddings), Ftrl (Optimizer that implements the FTRL algorithm), Nadam (Much like Adam is essentially RMSprop with momentum; Nadam is Adam with Nesterov momentum), RMSprop (Optimizer that implements the RMSprop algorithm), SGD (Gradient descent -with momentum- optimizer).
39 * **learning_rate** (*float*) - (0.02) [0~100|0.01] Determines the step size at each iteration while moving toward a minimum of a loss function
40 * **batch_size** (*int*) - (100) [0~1000|1] Number of samples per gradient update.
41 * **max_epochs** (*int*) - (100) [0~1000|1] Number of epochs to train the model. As the early stopping is enabled, this is a maximum.
42 * **normalize_cm** (*bool*) - (False) Whether or not to normalize the confusion matrix.
43 * **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files.
44 * **restart** (*bool*) - (False) [WF property] Do not execute if output files exist.
45 * **sandbox_path** (*str*) - ("./") [WF property] Parent path to the sandbox directory.
47 Examples:
48 This is a use example of how to use the building block from Python::
50 from biobb_ml.neural_networks.recurrent_neural_network import recurrent_neural_network
51 prop = {
52 'target': {
53 'column': 'target'
54 },
55 'window_size': 5,
56 'validation_size': 0.2,
57 'test_size': 0.2,
58 'hidden_layers': [
59 {
60 'size': 10,
61 'activation': 'relu'
62 },
63 {
64 'size': 8,
65 'activation': 'relu'
66 }
67 ],
68 'optimizer': 'Adam',
69 'learning_rate': 0.01,
70 'batch_size': 32,
71 'max_epochs': 150
72 }
73 recurrent_neural_network(input_dataset_path='/path/to/myDataset.csv',
74 output_model_path='/path/to/newModel.h5',
75 output_test_table_path='/path/to/newTable.csv',
76 output_plot_path='/path/to/newPlot.png',
77 properties=prop)
79 Info:
80 * wrapped_software:
81 * name: TensorFlow Keras LSTM
82 * version: >2.1.0
83 * license: MIT
84 * ontology:
85 * name: EDAM
86 * schema: http://edamontology.org/EDAM.owl
88 """
90 def __init__(self, input_dataset_path, output_model_path,
91 output_test_table_path=None, output_plot_path=None, properties=None, **kwargs) -> None:
92 properties = properties or {}
94 # Call parent class constructor
95 super().__init__(properties)
96 self.locals_var_dict = locals().copy()
98 # Input/Output files
99 self.io_dict = {
100 "in": {"input_dataset_path": input_dataset_path},
101 "out": {"output_model_path": output_model_path, "output_test_table_path": output_test_table_path, "output_plot_path": output_plot_path}
102 }
104 # Properties specific for BB
105 self.target = properties.get('target', '')
106 self.validation_size = properties.get('validation_size', 0.1)
107 self.window_size = properties.get('window_size', 5)
108 self.test_size = properties.get('test_size', 5)
109 self.hidden_layers = properties.get('hidden_layers', [])
110 self.optimizer = properties.get('optimizer', 'Adam')
111 self.learning_rate = properties.get('learning_rate', 0.02)
112 self.batch_size = properties.get('batch_size', 100)
113 self.max_epochs = properties.get('max_epochs', 100)
114 self.normalize_cm = properties.get('normalize_cm', False)
115 self.properties = properties
117 # Check the properties
118 self.check_properties(properties)
119 self.check_arguments()
121 def check_data_params(self, out_log, err_log):
122 """ Checks all the input/output paths and parameters """
123 self.io_dict["in"]["input_dataset_path"] = check_input_path(self.io_dict["in"]["input_dataset_path"], "input_dataset_path", False, out_log, self.__class__.__name__)
124 self.io_dict["out"]["output_model_path"] = check_output_path(self.io_dict["out"]["output_model_path"], "output_model_path", False, out_log, self.__class__.__name__)
125 self.io_dict["out"]["output_test_table_path"] = check_output_path(self.io_dict["out"]["output_test_table_path"], "output_test_table_path", True, out_log, self.__class__.__name__)
126 self.io_dict["out"]["output_plot_path"] = check_output_path(self.io_dict["out"]["output_plot_path"], "output_plot_path", True, out_log, self.__class__.__name__)
128 def build_model(self, input_shape):
129 """ Builds Neural network according to hidden_layers property """
131 # create model
132 model = Sequential([])
134 # if no hidden_layers provided, create manually a hidden layer with default values
135 if not self.hidden_layers:
136 self.hidden_layers = [{'size': 50, 'activation': 'relu'}]
138 # generate hidden_layers
139 for i, layer in enumerate(self.hidden_layers):
140 if i == 0:
141 model.add(LSTM(layer['size'], activation=layer['activation'], kernel_initializer='he_normal', input_shape=input_shape)) # 1st hidden layer
142 else:
143 model.add(Dense(layer['size'], activation=layer['activation'], kernel_initializer='he_normal'))
145 model.add(Dense(1)) # output layer
147 return model
149 @launchlogger
150 def launch(self) -> int:
151 """Execute the :class:`RecurrentNeuralNetwork <neural_networks.recurrent_neural_network.RecurrentNeuralNetwork>` neural_networks.recurrent_neural_network.RecurrentNeuralNetwork object."""
153 # check input/output paths and parameters
154 self.check_data_params(self.out_log, self.err_log)
156 # Setup Biobb
157 if self.check_restart():
158 return 0
159 self.stage_files()
161 # load dataset
162 fu.log('Getting dataset from %s' % self.io_dict["in"]["input_dataset_path"], self.out_log, self.global_log)
163 if 'column' in self.target:
164 labels = getHeader(self.io_dict["in"]["input_dataset_path"])
165 skiprows = 1
166 else:
167 labels = None
168 skiprows = None
169 data = pd.read_csv(self.io_dict["in"]["input_dataset_path"], header=None, sep="\\s+|;|:|,|\t", engine="python", skiprows=skiprows, names=labels)
171 # get target column
172 target = data[getTargetValue(self.target)].to_numpy()
174 # split into samples
175 X, y = split_sequence(target, self.window_size)
176 # reshape into [samples, timesteps, features]
177 X = X.reshape((X.shape[0], X.shape[1], 1))
179 # train / test split
180 fu.log('Creating train and test sets', self.out_log, self.global_log)
181 X_train, X_test, y_train, y_test = X[:-self.test_size], X[-self.test_size:], y[:-self.test_size], y[-self.test_size:]
183 # build model
184 fu.log('Building model', self.out_log, self.global_log)
185 model = self.build_model((X_train.shape[1], 1))
187 # model summary
188 stringlist = []
189 model.summary(print_fn=lambda x: stringlist.append(x))
190 model_summary = "\n".join(stringlist)
191 fu.log('Model summary:\n\n%s\n' % model_summary, self.out_log, self.global_log)
193 # get optimizer
194 mod = __import__('tensorflow.keras.optimizers', fromlist=[self.optimizer])
195 opt_class = getattr(mod, self.optimizer)
196 opt = opt_class(lr=self.learning_rate)
197 # compile model
198 model.compile(optimizer=opt, loss='mse', metrics=['mse', 'mae'])
200 # fitting
201 fu.log('Training model', self.out_log, self.global_log)
202 # set an early stopping mechanism
203 # set patience=2, to be a bit tolerant against random validation loss increases
204 early_stopping = EarlyStopping(patience=2)
205 # fit the model
206 mf = model.fit(X_train,
207 y_train,
208 batch_size=self.batch_size,
209 epochs=self.max_epochs,
210 callbacks=[early_stopping],
211 validation_split=self.validation_size,
212 verbose=1)
214 train_metrics = pd.DataFrame()
215 train_metrics['metric'] = ['Train loss', ' Train MAE', 'Train MSE', 'Validation loss', 'Validation MAE', 'Validation MSE']
216 train_metrics['coefficient'] = [mf.history['loss'][-1], mf.history['mae'][-1], mf.history['mse'][-1], mf.history['val_loss'][-1], mf.history['val_mae'][-1], mf.history['val_mse'][-1]]
218 fu.log('Training metrics\n\nTRAINING METRICS TABLE\n\n%s\n' % train_metrics, self.out_log, self.global_log)
220 # testing
221 fu.log('Testing model', self.out_log, self.global_log)
222 test_loss, test_mse, test_mae = model.evaluate(X_test, y_test)
224 # predict data from X_test
225 test_predictions = model.predict(X_test)
226 test_predictions = np.around(test_predictions, decimals=2)
227 tpr = np.squeeze(np.asarray(test_predictions))
229 test_metrics = pd.DataFrame()
230 test_metrics['metric'] = ['Test loss', 'Test MAE', 'Test MSE']
231 test_metrics['coefficient'] = [test_loss, test_mae, test_mse]
233 fu.log('Testing metrics\n\nTESTING METRICS TABLE\n\n%s\n' % test_metrics, self.out_log, self.global_log)
235 test_table = pd.DataFrame()
236 test_table['prediction'] = tpr
237 test_table['target'] = y_test
238 test_table['residual'] = test_table['target'] - test_table['prediction']
239 test_table['difference %'] = np.absolute(test_table['residual']/test_table['target']*100)
240 pd.set_option('display.float_format', lambda x: '%.2f' % x)
241 # sort by difference in %
242 test_table = test_table.sort_values(by=['difference %'])
243 test_table = test_table.reset_index(drop=True)
244 fu.log('TEST DATA\n\n%s\n' % test_table, self.out_log, self.global_log)
246 # save test data
247 if (self.io_dict["out"]["output_test_table_path"]):
248 fu.log('Saving testing data to %s' % self.io_dict["out"]["output_test_table_path"], self.out_log, self.global_log)
249 test_table.to_csv(self.io_dict["out"]["output_test_table_path"], index=False, header=True)
251 # create test plot
252 if (self.io_dict["out"]["output_plot_path"]):
253 fu.log('Saving plot to %s' % self.io_dict["out"]["output_plot_path"], self.out_log, self.global_log)
254 test_predictions = test_predictions.flatten()
255 train_predictions = model.predict(X_train).flatten()
256 plot = plotResultsReg(mf.history, y_test, test_predictions, y_train, train_predictions)
257 plot.savefig(self.io_dict["out"]["output_plot_path"], dpi=150)
259 # save model and parameters
260 vars_obj = {
261 'target': self.target,
262 'window_size': self.window_size,
263 'type': 'recurrent'
264 }
265 variables = json.dumps(vars_obj)
266 fu.log('Saving model to %s' % self.io_dict["out"]["output_model_path"], self.out_log, self.global_log)
267 with h5py.File(self.io_dict["out"]["output_model_path"], mode='w') as f:
268 hdf5_format.save_model_to_hdf5(model, f)
269 f.attrs['variables'] = variables
271 # Copy files to host
272 self.copy_to_host()
274 self.tmp_files.extend([
275 self.stage_io_dict.get("unique_dir")
276 ])
277 self.remove_tmp_files()
279 self.check_arguments(output_files_created=True, raise_exception=False)
281 return 0
284def recurrent_neural_network(input_dataset_path: str, output_model_path: str, output_test_table_path: str = None, output_plot_path: str = None, properties: dict = None, **kwargs) -> int:
285 """Execute the :class:`RecurrentNeuralNetwork <neural_networks.recurrent_neural_network.RecurrentNeuralNetwork>` class and
286 execute the :meth:`launch() <neural_networks.recurrent_neural_network.RecurrentNeuralNetwork.launch>` method."""
288 return RecurrentNeuralNetwork(input_dataset_path=input_dataset_path,
289 output_model_path=output_model_path,
290 output_test_table_path=output_test_table_path,
291 output_plot_path=output_plot_path,
292 properties=properties, **kwargs).launch()
295def main():
296 """Command line execution of this building block. Please check the command line documentation."""
297 parser = argparse.ArgumentParser(description="Wrapper of the TensorFlow Keras LSTM method.", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))
298 parser.add_argument('--config', required=False, help='Configuration file')
300 # Specific args of each building block
301 required_args = parser.add_argument_group('required arguments')
302 required_args.add_argument('--input_dataset_path', required=True, help='Path to the input dataset. Accepted formats: csv.')
303 required_args.add_argument('--output_model_path', required=True, help='Path to the output model file. Accepted formats: h5.')
304 parser.add_argument('--output_test_table_path', required=False, help='Path to the test table file. Accepted formats: csv.')
305 parser.add_argument('--output_plot_path', required=False, help='Loss, accuracy and MSE plots. Accepted formats: png.')
307 args = parser.parse_args()
308 args.config = args.config or "{}"
309 properties = settings.ConfReader(config=args.config).get_prop_dic()
311 # Specific call of each building block
312 recurrent_neural_network(input_dataset_path=args.input_dataset_path,
313 output_model_path=args.output_model_path,
314 output_test_table_path=args.output_test_table_path,
315 output_plot_path=args.output_plot_path,
316 properties=properties)
319if __name__ == '__main__':
320 main()