Coverage for biobb_ml/classification/random_forest_classifier.py: 83%

152 statements  

« prev     ^ index     » next       coverage.py v7.5.1, created at 2024-05-07 09:39 +0000

1#!/usr/bin/env python3 

2 

3"""Module containing the RandomForestClassifier class and the command line interface.""" 

4import argparse 

5import joblib 

6import pandas as pd 

7import numpy as np 

8from biobb_common.generic.biobb_object import BiobbObject 

9from sklearn.preprocessing import StandardScaler 

10from sklearn.model_selection import train_test_split 

11from sklearn.metrics import confusion_matrix, classification_report, log_loss 

12from sklearn import ensemble 

13from biobb_common.configuration import settings 

14from biobb_common.tools import file_utils as fu 

15from biobb_common.tools.file_utils import launchlogger 

16from biobb_ml.classification.common import check_input_path, check_output_path, getHeader, getIndependentVars, getIndependentVarsList, getTarget, getTargetValue, getWeight, plotMultipleCM, plotBinaryClassifier 

17 

18 

19class RandomForestClassifier(BiobbObject): 

20 """ 

21 | biobb_ml RandomForestClassifier 

22 | Wrapper of the scikit-learn RandomForestClassifier method. 

23 | Trains and tests a given dataset and saves the model and scaler. Visit the `RandomForestClassifier documentation page <https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html>`_ in the sklearn official website for further information. 

24 

25 Args: 

26 input_dataset_path (str): Path to the input dataset. File type: input. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/data/classification/dataset_random_forest_classifier.csv>`_. Accepted formats: csv (edam:format_3752). 

27 output_model_path (str): Path to the output model file. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/classification/ref_output_model_random_forest_classifier.pkl>`_. Accepted formats: pkl (edam:format_3653). 

28 output_test_table_path (str) (Optional): Path to the test table file. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/classification/ref_output_test_random_forest_classifier.csv>`_. Accepted formats: csv (edam:format_3752). 

29 output_plot_path (str) (Optional): Path to the statistics plot. If target is binary it shows confusion matrix, distributions of the predicted probabilities of both classes and ROC curve. If target is non-binary it shows confusion matrix. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/classification/ref_output_plot_random_forest_classifier.png>`_. Accepted formats: png (edam:format_3603). 

30 properties (dic - Python dictionary object containing the tool parameters, not input/output files): 

31 * **independent_vars** (*dict*) - ({}) Independent variables you want to train from your dataset. You can specify either a list of columns names from your input dataset, a list of columns indexes or a range of columns indexes. Formats: { "columns": ["column1", "column2"] } or { "indexes": [0, 2, 3, 10, 11, 17] } or { "range": [[0, 20], [50, 102]] }. In case of mulitple formats, the first one will be picked. 

32 * **target** (*dict*) - ({}) Dependent variable you want to predict from your dataset. You can specify either a column name or a column index. Formats: { "column": "column3" } or { "index": 21 }. In case of mulitple formats, the first one will be picked. 

33 * **weight** (*dict*) - ({}) Weight variable from your dataset. You can specify either a column name or a column index. Formats: { "column": "column3" } or { "index": 21 }. In case of mulitple formats, the first one will be picked. 

34 * **n_estimators** (*int*) - (100) The number of trees in the forest. 

35 * **bootstrap** (*bool*) - (True) Whether bootstrap samples are used when building trees. If False, the whole dataset is used to build each tree. 

36 * **normalize_cm** (*bool*) - (False) Whether or not to normalize the confusion matrix. 

37 * **random_state_method** (*int*) - (5) [1~1000|1] Controls the randomness of the estimator. 

38 * **random_state_train_test** (*int*) - (5) [1~1000|1] Controls the shuffling applied to the data before applying the split. 

39 * **test_size** (*float*) - (0.2) [0~1|0.05] Represents the proportion of the dataset to include in the test split. It should be between 0.0 and 1.0. 

40 * **scale** (*bool*) - (False) Whether or not to scale the input dataset. 

41 * **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files. 

42 * **restart** (*bool*) - (False) [WF property] Do not execute if output files exist. 

43 

44 Examples: 

45 This is a use example of how to use the building block from Python:: 

46 

47 from biobb_ml.classification.random_forest_classifier import random_forest_classifier 

48 prop = { 

49 'independent_vars': { 

50 'columns': [ 'column1', 'column2', 'column3' ] 

51 }, 

52 'target': { 

53 'column': 'target' 

54 }, 

55 'n_estimators': 100, 

56 'test_size': 0.2 

57 } 

58 random_forest_classifier(input_dataset_path='/path/to/myDataset.csv', 

59 output_model_path='/path/to/newModel.pkl', 

60 output_test_table_path='/path/to/newTable.csv', 

61 output_plot_path='/path/to/newPlot.png', 

62 properties=prop) 

63 

64 Info: 

65 * wrapped_software: 

66 * name: scikit-learn RandomForestClassifier 

67 * version: >=0.24.2 

68 * license: BSD 3-Clause 

69 * ontology: 

70 * name: EDAM 

71 * schema: http://edamontology.org/EDAM.owl 

72 

73 """ 

74 

75 def __init__(self, input_dataset_path, output_model_path, 

76 output_test_table_path=None, output_plot_path=None, properties=None, **kwargs) -> None: 

77 properties = properties or {} 

78 

79 # Call parent class constructor 

80 super().__init__(properties) 

81 self.locals_var_dict = locals().copy() 

82 

83 # Input/Output files 

84 self.io_dict = { 

85 "in": {"input_dataset_path": input_dataset_path}, 

86 "out": {"output_model_path": output_model_path, "output_test_table_path": output_test_table_path, "output_plot_path": output_plot_path} 

87 } 

88 

89 # Properties specific for BB 

90 self.independent_vars = properties.get('independent_vars', {}) 

91 self.target = properties.get('target', {}) 

92 self.weight = properties.get('weight', {}) 

93 self.n_estimators = properties.get('n_estimators', 100) 

94 self.bootstrap = properties.get('bootstrap', True) 

95 self.normalize_cm = properties.get('normalize_cm', False) 

96 self.random_state_method = properties.get('random_state_method', 5) 

97 self.random_state_train_test = properties.get('random_state_train_test', 5) 

98 self.test_size = properties.get('test_size', 0.2) 

99 self.scale = properties.get('scale', False) 

100 self.properties = properties 

101 

102 # Check the properties 

103 self.check_properties(properties) 

104 self.check_arguments() 

105 

106 def check_data_params(self, out_log, err_log): 

107 """ Checks all the input/output paths and parameters """ 

108 self.io_dict["in"]["input_dataset_path"] = check_input_path(self.io_dict["in"]["input_dataset_path"], "input_dataset_path", out_log, self.__class__.__name__) 

109 self.io_dict["out"]["output_model_path"] = check_output_path(self.io_dict["out"]["output_model_path"], "output_model_path", False, out_log, self.__class__.__name__) 

110 if self.io_dict["out"]["output_test_table_path"]: 

111 self.io_dict["out"]["output_test_table_path"] = check_output_path(self.io_dict["out"]["output_test_table_path"], "output_test_table_path", True, out_log, self.__class__.__name__) 

112 if self.io_dict["out"]["output_plot_path"]: 

113 self.io_dict["out"]["output_plot_path"] = check_output_path(self.io_dict["out"]["output_plot_path"], "output_plot_path", True, out_log, self.__class__.__name__) 

114 

115 @launchlogger 

116 def launch(self) -> int: 

117 """Execute the :class:`RandomForestClassifier <classification.random_forest_classifier.RandomForestClassifier>` classification.random_forest_classifier.RandomForestClassifier object.""" 

118 

119 # check input/output paths and parameters 

120 self.check_data_params(self.out_log, self.err_log) 

121 

122 # Setup Biobb 

123 if self.check_restart(): 

124 return 0 

125 self.stage_files() 

126 

127 # load dataset 

128 fu.log('Getting dataset from %s' % self.io_dict["in"]["input_dataset_path"], self.out_log, self.global_log) 

129 if 'columns' in self.independent_vars: 

130 labels = getHeader(self.io_dict["in"]["input_dataset_path"]) 

131 skiprows = 1 

132 else: 

133 labels = None 

134 skiprows = None 

135 data = pd.read_csv(self.io_dict["in"]["input_dataset_path"], header=None, sep="\\s+|;|:|,|\t", engine="python", skiprows=skiprows, names=labels) 

136 

137 # declare inputs, targets and weights 

138 # the inputs are all the independent variables 

139 X = getIndependentVars(self.independent_vars, data, self.out_log, self.__class__.__name__) 

140 fu.log('Independent variables: [%s]' % (getIndependentVarsList(self.independent_vars)), self.out_log, self.global_log) 

141 # target 

142 y = getTarget(self.target, data, self.out_log, self.__class__.__name__) 

143 fu.log('Target: %s' % (getTargetValue(self.target)), self.out_log, self.global_log) 

144 # weights 

145 if self.weight: 

146 w = getWeight(self.weight, data, self.out_log, self.__class__.__name__) 

147 fu.log('Weight column provided', self.out_log, self.global_log) 

148 

149 # train / test split 

150 fu.log('Creating train and test sets', self.out_log, self.global_log) 

151 arrays_sets = (X, y) 

152 # if user provide weights 

153 if self.weight: 

154 arrays_sets = arrays_sets + (w,) 

155 X_train, X_test, y_train, y_test, w_train, w_test = train_test_split(*arrays_sets, test_size=self.test_size, random_state=self.random_state_train_test) 

156 else: 

157 X_train, X_test, y_train, y_test = train_test_split(*arrays_sets, test_size=self.test_size, random_state=self.random_state_train_test) 

158 

159 # scale dataset 

160 if self.scale: 

161 fu.log('Scaling dataset', self.out_log, self.global_log) 

162 scaler = StandardScaler() 

163 X_train = scaler.fit_transform(X_train) 

164 

165 # classification 

166 fu.log('Training dataset applying random forest classification', self.out_log, self.global_log) 

167 model = ensemble.RandomForestClassifier(n_estimators=self.n_estimators, bootstrap=self.bootstrap, random_state=self.random_state_method) 

168 arrays_fit = (X_train, y_train) 

169 # if user provide weights 

170 if self.weight: 

171 arrays_fit = arrays_fit + (w_train,) 

172 

173 model.fit(*arrays_fit) 

174 

175 y_hat_train = model.predict(X_train) 

176 # classification report 

177 cr_train = classification_report(y_train, y_hat_train) 

178 # log loss 

179 yhat_prob_train = model.predict_proba(X_train) 

180 l_loss_train = log_loss(y_train, yhat_prob_train) 

181 fu.log('Calculating scores and report for training dataset\n\nCLASSIFICATION REPORT\n\n%s\nLog loss: %.3f\n' % (cr_train, l_loss_train), self.out_log, self.global_log) 

182 

183 # compute confusion matrix 

184 cnf_matrix_train = confusion_matrix(y_train, y_hat_train) 

185 np.set_printoptions(precision=2) 

186 if self.normalize_cm: 

187 cnf_matrix_train = cnf_matrix_train.astype('float') / cnf_matrix_train.sum(axis=1)[:, np.newaxis] 

188 cm_type = 'NORMALIZED CONFUSION MATRIX' 

189 else: 

190 cm_type = 'CONFUSION MATRIX, WITHOUT NORMALIZATION' 

191 

192 fu.log('Calculating confusion matrix for training dataset\n\n%s\n\n%s\n' % (cm_type, cnf_matrix_train), self.out_log, self.global_log) 

193 

194 # testing 

195 if self.scale: 

196 X_test = scaler.transform(X_test) 

197 y_hat_test = model.predict(X_test) 

198 test_table = pd.DataFrame() 

199 y_hat_prob = model.predict_proba(X_test) 

200 y_hat_prob = np.around(y_hat_prob, decimals=2) 

201 y_hat_prob = tuple(map(tuple, y_hat_prob)) 

202 test_table['P' + np.array2string(np.unique(y_test))] = y_hat_prob 

203 y_test = y_test.reset_index(drop=True) 

204 test_table['target'] = y_test 

205 fu.log('Testing\n\nTEST DATA\n\n%s\n' % test_table, self.out_log, self.global_log) 

206 

207 # classification report 

208 cr = classification_report(y_test, y_hat_test) 

209 # log loss 

210 yhat_prob = model.predict_proba(X_test) 

211 l_loss = log_loss(y_test, yhat_prob) 

212 fu.log('Calculating scores and report for testing dataset\n\nCLASSIFICATION REPORT\n\n%s\nLog loss: %.3f\n' % (cr, l_loss), self.out_log, self.global_log) 

213 

214 # compute confusion matrix 

215 cnf_matrix = confusion_matrix(y_test, y_hat_test) 

216 np.set_printoptions(precision=2) 

217 if self.normalize_cm: 

218 cnf_matrix = cnf_matrix.astype('float') / cnf_matrix.sum(axis=1)[:, np.newaxis] 

219 cm_type = 'NORMALIZED CONFUSION MATRIX' 

220 else: 

221 cm_type = 'CONFUSION MATRIX, WITHOUT NORMALIZATION' 

222 

223 fu.log('Calculating confusion matrix for testing dataset\n\n%s\n\n%s\n' % (cm_type, cnf_matrix), self.out_log, self.global_log) 

224 

225 if (self.io_dict["out"]["output_test_table_path"]): 

226 fu.log('Saving testing data to %s' % self.io_dict["out"]["output_test_table_path"], self.out_log, self.global_log) 

227 test_table.to_csv(self.io_dict["out"]["output_test_table_path"], index=False, header=True) 

228 

229 # plot 

230 if self.io_dict["out"]["output_plot_path"]: 

231 vs = y.unique().tolist() 

232 vs.sort() 

233 if len(vs) > 2: 

234 plot = plotMultipleCM(cnf_matrix_train, cnf_matrix, self.normalize_cm, vs) 

235 fu.log('Saving confusion matrix plot to %s' % self.io_dict["out"]["output_plot_path"], self.out_log, self.global_log) 

236 else: 

237 plot = plotBinaryClassifier(model, yhat_prob_train, yhat_prob, cnf_matrix_train, cnf_matrix, y_train, y_test, normalize=self.normalize_cm) 

238 fu.log('Saving binary classifier evaluator plot to %s' % self.io_dict["out"]["output_plot_path"], self.out_log, self.global_log) 

239 plot.savefig(self.io_dict["out"]["output_plot_path"], dpi=150) 

240 

241 # save model, scaler and parameters 

242 tv = y.unique().tolist() 

243 tv.sort() 

244 variables = { 

245 'target': self.target, 

246 'independent_vars': self.independent_vars, 

247 'scale': self.scale, 

248 'target_values': tv 

249 } 

250 fu.log('Saving model to %s' % self.io_dict["out"]["output_model_path"], self.out_log, self.global_log) 

251 with open(self.io_dict["out"]["output_model_path"], "wb") as f: 

252 joblib.dump(model, f) 

253 if self.scale: 

254 joblib.dump(scaler, f) 

255 joblib.dump(variables, f) 

256 

257 # Copy files to host 

258 self.copy_to_host() 

259 

260 self.tmp_files.extend([ 

261 self.stage_io_dict.get("unique_dir") 

262 ]) 

263 self.remove_tmp_files() 

264 

265 self.check_arguments(output_files_created=True, raise_exception=False) 

266 

267 return 0 

268 

269 

270def random_forest_classifier(input_dataset_path: str, output_model_path: str, output_test_table_path: str = None, output_plot_path: str = None, properties: dict = None, **kwargs) -> int: 

271 """Execute the :class:`RandomForestClassifier <classification.random_forest_classifier.RandomForestClassifier>` class and 

272 execute the :meth:`launch() <classification.random_forest_classifier.RandomForestClassifier.launch>` method.""" 

273 

274 return RandomForestClassifier(input_dataset_path=input_dataset_path, 

275 output_model_path=output_model_path, 

276 output_test_table_path=output_test_table_path, 

277 output_plot_path=output_plot_path, 

278 properties=properties, **kwargs).launch() 

279 

280 

281def main(): 

282 """Command line execution of this building block. Please check the command line documentation.""" 

283 parser = argparse.ArgumentParser(description="Wrapper of the scikit-learn RandomForestClassifier method.", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999)) 

284 parser.add_argument('--config', required=False, help='Configuration file') 

285 

286 # Specific args of each building block 

287 required_args = parser.add_argument_group('required arguments') 

288 required_args.add_argument('--input_dataset_path', required=True, help='Path to the input dataset. Accepted formats: csv.') 

289 required_args.add_argument('--output_model_path', required=True, help='Path to the output model file. Accepted formats: pkl.') 

290 parser.add_argument('--output_test_table_path', required=False, help='Path to the test table file. Accepted formats: csv.') 

291 parser.add_argument('--output_plot_path', required=False, help='Path to the statistics plot. If target is binary it shows confusion matrix, distributions of the predicted probabilities of both classes and ROC curve. If target is non-binary it shows confusion matrix. Accepted formats: png.') 

292 

293 args = parser.parse_args() 

294 args.config = args.config or "{}" 

295 properties = settings.ConfReader(config=args.config).get_prop_dic() 

296 

297 # Specific call of each building block 

298 random_forest_classifier(input_dataset_path=args.input_dataset_path, 

299 output_model_path=args.output_model_path, 

300 output_test_table_path=args.output_test_table_path, 

301 output_plot_path=args.output_plot_path, 

302 properties=properties) 

303 

304 

305if __name__ == '__main__': 

306 main()