Coverage for biobb_ml/regression/regression_predict.py: 75%
96 statements
« prev ^ index » next coverage.py v7.6.1, created at 2024-10-03 14:57 +0000
« prev ^ index » next coverage.py v7.6.1, created at 2024-10-03 14:57 +0000
1#!/usr/bin/env python3
3"""Module containing the RegressionPredict class and the command line interface."""
4import argparse
5import pandas as pd
6import joblib
7from biobb_common.generic.biobb_object import BiobbObject
8from sklearn.preprocessing import StandardScaler, PolynomialFeatures
9from sklearn import linear_model
10from sklearn import ensemble
11from biobb_common.configuration import settings
12from biobb_common.tools import file_utils as fu
13from biobb_common.tools.file_utils import launchlogger
14from biobb_ml.regression.common import check_input_path, check_output_path, getHeader, get_list_of_predictors, get_keys_of_predictors
17class RegressionPredict(BiobbObject):
18 """
19 | biobb_ml RegressionPredict
20 | Makes predictions from an input dataset and a given regression model.
21 | Makes predictions from an input dataset (provided either as a file or as a dictionary property) and a given regression model trained with `LinearRegression <https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html>`_, `RandomForestRegressor <https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html>`_ methods.
23 Args:
24 input_model_path (str): Path to the input model. File type: input. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/data/regression/model_regression_predict.pkl>`_. Accepted formats: pkl (edam:format_3653).
25 input_dataset_path (str) (Optional): Path to the dataset to predict. File type: input. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/data/regression/input_regression_predict.csv>`_. Accepted formats: csv (edam:format_3752).
26 output_results_path (str): Path to the output results file. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/regression/ref_output_regression_predict.csv>`_. Accepted formats: csv (edam:format_3752).
27 properties (dic - Python dictionary object containing the tool parameters, not input/output files):
28 * **predictions** (*list*) - (None) List of dictionaries with all values you want to predict targets. It will be taken into account only in case **input_dataset_path** is not provided. Format: [{ 'var1': 1.0, 'var2': 2.0 }, { 'var1': 4.0, 'var2': 2.7 }] for datasets with headers and [[ 1.0, 2.0 ], [ 4.0, 2.7 ]] for datasets without headers.
29 * **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files.
30 * **restart** (*bool*) - (False) [WF property] Do not execute if output files exist.
31 * **sandbox_path** (*str*) - ("./") [WF property] Parent path to the sandbox directory.
33 Examples:
34 This is a use example of how to use the building block from Python::
36 from biobb_ml.regression.regression_predict import regression_predict
37 prop = {
38 'predictions': [
39 {
40 'var1': 1.0,
41 'var2': 2.0
42 },
43 {
44 'var1': 4.0,
45 'var2': 2.7
46 }
47 ]
48 }
49 regression_predict(input_model_path='/path/to/myModel.pkl',
50 output_results_path='/path/to/newPredictedResults.csv',
51 input_dataset_path='/path/to/myDataset.csv',
52 properties=prop)
54 Info:
55 * wrapped_software:
56 * name: scikit-learn
57 * version: >=0.24.2
58 * license: BSD 3-Clause
59 * ontology:
60 * name: EDAM
61 * schema: http://edamontology.org/EDAM.owl
63 """
65 def __init__(self, input_model_path, output_results_path,
66 input_dataset_path=None, properties=None, **kwargs) -> None:
67 properties = properties or {}
69 # Call parent class constructor
70 super().__init__(properties)
71 self.locals_var_dict = locals().copy()
73 # Input/Output files
74 self.io_dict = {
75 "in": {"input_model_path": input_model_path, "input_dataset_path": input_dataset_path},
76 "out": {"output_results_path": output_results_path}
77 }
79 # Properties specific for BB
80 self.predictions = properties.get('predictions', [])
81 self.properties = properties
83 # Check the properties
84 self.check_properties(properties)
85 self.check_arguments()
87 def check_data_params(self, out_log, err_log):
88 """ Checks all the input/output paths and parameters """
89 self.io_dict["in"]["input_model_path"] = check_input_path(self.io_dict["in"]["input_model_path"], "input_model_path", out_log, self.__class__.__name__)
90 self.io_dict["out"]["output_results_path"] = check_output_path(self.io_dict["out"]["output_results_path"], "output_results_path", False, out_log, self.__class__.__name__)
91 if self.io_dict["in"]["input_dataset_path"]:
92 self.io_dict["in"]["input_dataset_path"] = check_input_path(self.io_dict["in"]["input_dataset_path"], "input_dataset_path", out_log, self.__class__.__name__)
94 @launchlogger
95 def launch(self) -> int:
96 """Execute the :class:`RegressionPredict <regression.regression_predict.RegressionPredict>` regression.regression_predict.RegressionPredict object."""
98 # check input/output paths and parameters
99 self.check_data_params(self.out_log, self.err_log)
101 # Setup Biobb
102 if self.check_restart():
103 return 0
104 self.stage_files()
106 fu.log('Getting model from %s' % self.io_dict["in"]["input_model_path"], self.out_log, self.global_log)
108 with open(self.io_dict["in"]["input_model_path"], "rb") as f:
109 while True:
110 try:
111 m = joblib.load(f)
112 if (isinstance(m, linear_model.LinearRegression) or isinstance(m, ensemble.RandomForestRegressor)):
113 new_model = m
114 if isinstance(m, StandardScaler):
115 scaler = m
116 if isinstance(m, PolynomialFeatures):
117 poly_features = m
118 if isinstance(m, dict):
119 variables = m
120 except EOFError:
121 break
123 if self.io_dict["in"]["input_dataset_path"]:
124 # load dataset from input_dataset_path file
125 fu.log('Getting dataset from %s' % self.io_dict["in"]["input_dataset_path"], self.out_log, self.global_log)
126 if 'columns' in variables['independent_vars']:
127 labels = getHeader(self.io_dict["in"]["input_dataset_path"])
128 skiprows = 1
129 else:
130 labels = None
131 skiprows = None
132 new_data_table = pd.read_csv(self.io_dict["in"]["input_dataset_path"], header=None, sep="\\s+|;|:|,|\t", engine="python", skiprows=skiprows, names=labels)
133 else:
134 # load dataset from properties
135 if 'columns' in variables['independent_vars']:
136 # sorting self.properties in the correct order given by variables['independent_vars']['columns']
137 index_map = {v: i for i, v in enumerate(variables['independent_vars']['columns'])}
138 predictions = []
139 for i, pred in enumerate(self.predictions):
140 sorted_pred = sorted(pred.items(), key=lambda pair: index_map[pair[0]])
141 predictions.append(dict(sorted_pred))
142 new_data_table = pd.DataFrame(data=get_list_of_predictors(predictions), columns=get_keys_of_predictors(predictions))
143 else:
144 predictions = self.predictions
145 new_data_table = pd.DataFrame(data=predictions)
147 if variables['scale']:
148 fu.log('Scaling dataset', self.out_log, self.global_log)
149 new_data = scaler.transform(new_data_table)
150 else:
151 new_data = new_data_table
153 if 'poly_features' in locals():
154 new_data = poly_features.transform(new_data)
155 p = new_model.predict(new_data)
157 if self.io_dict["in"]["input_dataset_path"] or 'columns' in variables['independent_vars']:
158 new_data_table[variables['target']['column']] = p
159 else:
160 new_data_table[len(new_data_table.columns)] = p
162 fu.log('Predicting results\n\nPREDICTION RESULTS\n\n%s\n' % new_data_table, self.out_log, self.global_log)
163 fu.log('Saving results to %s' % self.io_dict["out"]["output_results_path"], self.out_log, self.global_log)
164 new_data_table.to_csv(self.io_dict["out"]["output_results_path"], index=False, header=True, float_format='%.3f')
166 # Copy files to host
167 self.copy_to_host()
169 self.tmp_files.extend([
170 self.stage_io_dict.get("unique_dir")
171 ])
172 self.remove_tmp_files()
174 self.check_arguments(output_files_created=True, raise_exception=False)
176 return 0
179def regression_predict(input_model_path: str, output_results_path: str, input_dataset_path: str = None, properties: dict = None, **kwargs) -> int:
180 """Execute the :class:`RegressionPredict <regression.regression_predict.RegressionPredict>` class and
181 execute the :meth:`launch() <regression.regression_predict.RegressionPredict.launch>` method."""
183 return RegressionPredict(input_model_path=input_model_path,
184 output_results_path=output_results_path,
185 input_dataset_path=input_dataset_path,
186 properties=properties, **kwargs).launch()
189def main():
190 """Command line execution of this building block. Please check the command line documentation."""
191 parser = argparse.ArgumentParser(description="Makes predictions from an input dataset and a given regression model.", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))
192 parser.add_argument('--config', required=False, help='Configuration file')
194 # Specific args of each building block
195 required_args = parser.add_argument_group('required arguments')
196 required_args.add_argument('--input_model_path', required=True, help='Path to the input model. Accepted formats: pkl.')
197 required_args.add_argument('--output_results_path', required=True, help='Path to the output results file. Accepted formats: csv.')
198 parser.add_argument('--input_dataset_path', required=False, help='Path to the dataset to predict. Accepted formats: csv.')
200 args = parser.parse_args()
201 args.config = args.config or "{}"
202 properties = settings.ConfReader(config=args.config).get_prop_dic()
204 # Specific call of each building block
205 regression_predict(input_model_path=args.input_model_path,
206 output_results_path=args.output_results_path,
207 input_dataset_path=args.input_dataset_path,
208 properties=properties)
211if __name__ == '__main__':
212 main()