Coverage for biobb_ml/classification/k_neighbors_coefficient.py: 85%

125 statements  

« prev     ^ index     » next       coverage.py v7.5.1, created at 2024-05-07 09:39 +0000

1#!/usr/bin/env python3 

2 

3"""Module containing the KNeighborsCoefficient class and the command line interface.""" 

4import argparse 

5import pandas as pd 

6import numpy as np 

7import matplotlib.pyplot as plt 

8from biobb_common.generic.biobb_object import BiobbObject 

9from sklearn.preprocessing import StandardScaler 

10from sklearn.model_selection import train_test_split 

11from sklearn.metrics import classification_report, log_loss 

12from sklearn.neighbors import KNeighborsClassifier 

13from biobb_common.configuration import settings 

14from biobb_common.tools import file_utils as fu 

15from biobb_common.tools.file_utils import launchlogger 

16from biobb_ml.classification.common import check_input_path, check_output_path, getHeader, getIndependentVars, getIndependentVarsList, getTarget, getTargetValue, getWeight 

17 

18 

19class KNeighborsCoefficient(BiobbObject): 

20 """ 

21 | biobb_ml KNeighborsCoefficient 

22 | Wrapper of the scikit-learn KNeighborsClassifier method. 

23 | Trains and tests a given dataset and calculates the best K coefficient. Visit the `KNeighborsClassifier documentation page <https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html>`_ in the sklearn official website for further information. 

24 

25 Args: 

26 input_dataset_path (str): Path to the input dataset. File type: input. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/data/classification/dataset_k_neighbors_coefficient.csv>`_. Accepted formats: csv (edam:format_3752). 

27 output_results_path (str): Path to the accuracy values list. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/classification/ref_output_test_k_neighbors_coefficient.csv>`_. Accepted formats: csv (edam:format_3752). 

28 output_plot_path (str) (Optional): Path to the accuracy plot. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/classification/ref_output_plot_k_neighbors_coefficient.png>`_. Accepted formats: png (edam:format_3603). 

29 properties (dic - Python dictionary object containing the tool parameters, not input/output files): 

30 * **independent_vars** (*list*) - (None) Independent variables or columns from your dataset you want to train. 

31 * **target** (*string*) - (None) Dependent variable or column from your dataset you want to predict. 

32 * **metric** (*string*) - ("minkowski") The distance metric to use for the tree. Values: euclidean (Computes the Euclidean distance between two 1-D arrays), manhattan (Compute the Manhattan distance), chebyshev (Compute the Chebyshev distance), minkowski (Compute the Minkowski distance between two 1-D arrays), wminkowski (Compute the weighted Minkowski distance between two 1-D arrays), seuclidean (Return the standardized Euclidean distance between two 1-D arrays), mahalanobi (Compute the Mahalanobis distance between two 1-D arrays). 

33 * **max_neighbors** (*int*) - (6) [1~100|1] Maximum number of neighbors to use by default for kneighbors queries. 

34 * **random_state_train_test** (*int*) - (5) [1~1000|1] Controls the shuffling applied to the data before applying the split. 

35 * **test_size** (*float*) - (0.2) [0~1|0.05] Represents the proportion of the dataset to include in the test split. It should be between 0.0 and 1.0. 

36 * **scale** (*bool*) - (False) Whether or not to scale the input dataset. 

37 * **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files. 

38 * **restart** (*bool*) - (False) [WF property] Do not execute if output files exist. 

39 

40 Examples: 

41 This is a use example of how to use the building block from Python:: 

42 

43 from biobb_ml.classification.k_neighbors_coefficient import k_neighbors_coefficient 

44 prop = { 

45 'independent_vars': { 

46 'columns': [ 'column1', 'column2', 'column3' ] 

47 }, 

48 'target': { 

49 'column': 'target' 

50 }, 

51 'max_neighbors': 6, 

52 'test_size': 0.2 

53 } 

54 k_neighbors_coefficient(input_dataset_path='/path/to/myDataset.csv', 

55 output_results_path='/path/to/newTable.csv', 

56 output_plot_path='/path/to/newPlot.png', 

57 properties=prop) 

58 

59 Info: 

60 * wrapped_software: 

61 * name: scikit-learn KNeighborsClassifier 

62 * version: >=0.24.2 

63 * license: BSD 3-Clause 

64 * ontology: 

65 * name: EDAM 

66 * schema: http://edamontology.org/EDAM.owl 

67 

68 """ 

69 

70 def __init__(self, input_dataset_path, output_results_path, 

71 output_plot_path=None, properties=None, **kwargs) -> None: 

72 properties = properties or {} 

73 

74 # Call parent class constructor 

75 super().__init__(properties) 

76 self.locals_var_dict = locals().copy() 

77 

78 # Input/Output files 

79 self.io_dict = { 

80 "in": {"input_dataset_path": input_dataset_path}, 

81 "out": {"output_results_path": output_results_path, "output_plot_path": output_plot_path} 

82 } 

83 

84 # Properties specific for BB 

85 self.independent_vars = properties.get('independent_vars', {}) 

86 self.target = properties.get('target', {}) 

87 self.weight = properties.get('weight', {}) 

88 self.metric = properties.get('metric', 'minkowski') 

89 self.max_neighbors = properties.get('max_neighbors', 6) 

90 self.random_state_train_test = properties.get('random_state_train_test', 5) 

91 self.test_size = properties.get('test_size', 0.2) 

92 self.scale = properties.get('scale', False) 

93 self.properties = properties 

94 

95 # Check the properties 

96 self.check_properties(properties) 

97 self.check_arguments() 

98 

99 def check_data_params(self, out_log, err_log): 

100 """ Checks all the input/output paths and parameters """ 

101 self.io_dict["in"]["input_dataset_path"] = check_input_path(self.io_dict["in"]["input_dataset_path"], "input_dataset_path", out_log, self.__class__.__name__) 

102 self.io_dict["out"]["output_results_path"] = check_output_path(self.io_dict["out"]["output_results_path"], "output_results_path", False, out_log, self.__class__.__name__) 

103 if self.io_dict["out"]["output_plot_path"]: 

104 self.io_dict["out"]["output_plot_path"] = check_output_path(self.io_dict["out"]["output_plot_path"], "output_plot_path", True, out_log, self.__class__.__name__) 

105 

106 @launchlogger 

107 def launch(self) -> int: 

108 """Execute the :class:`KNeighborsCoefficient <classification.k_neighbors_coefficient.KNeighborsCoefficient>` classification.k_neighbors_coefficient.KNeighborsCoefficient object.""" 

109 

110 # check input/output paths and parameters 

111 self.check_data_params(self.out_log, self.err_log) 

112 

113 # Setup Biobb 

114 if self.check_restart(): 

115 return 0 

116 self.stage_files() 

117 

118 # load dataset 

119 fu.log('Getting dataset from %s' % self.io_dict["in"]["input_dataset_path"], self.out_log, self.global_log) 

120 if 'columns' in self.independent_vars: 

121 labels = getHeader(self.io_dict["in"]["input_dataset_path"]) 

122 skiprows = 1 

123 else: 

124 labels = None 

125 skiprows = None 

126 data = pd.read_csv(self.io_dict["in"]["input_dataset_path"], header=None, sep="\\s+|;|:|,|\t", engine="python", skiprows=skiprows, names=labels) 

127 

128 # declare inputs, targets and weights 

129 # the inputs are all the independent variables 

130 X = getIndependentVars(self.independent_vars, data, self.out_log, self.__class__.__name__) 

131 fu.log('Independent variables: [%s]' % (getIndependentVarsList(self.independent_vars)), self.out_log, self.global_log) 

132 # target 

133 y = getTarget(self.target, data, self.out_log, self.__class__.__name__) 

134 fu.log('Target: %s' % (getTargetValue(self.target)), self.out_log, self.global_log) 

135 # weights 

136 if self.weight: 

137 w = getWeight(self.weight, data, self.out_log, self.__class__.__name__) 

138 fu.log('Weight column provided', self.out_log, self.global_log) 

139 

140 # train / test split 

141 fu.log('Creating train and test sets', self.out_log, self.global_log) 

142 arrays_sets = (X, y) 

143 # if user provide weights 

144 if self.weight: 

145 arrays_sets = arrays_sets + (w,) 

146 X_train, X_test, y_train, y_test, w_train, w_test = train_test_split(*arrays_sets, test_size=self.test_size, random_state=self.random_state_train_test) 

147 else: 

148 X_train, X_test, y_train, y_test = train_test_split(*arrays_sets, test_size=self.test_size, random_state=self.random_state_train_test) 

149 

150 # scale dataset 

151 if self.scale: 

152 fu.log('Scaling dataset', self.out_log, self.global_log) 

153 scaler = StandardScaler() 

154 X_train = scaler.fit_transform(X_train) 

155 

156 # training and getting accuracy for each K 

157 fu.log('Training dataset applying k neighbors classification from 1 to %d n_neighbors' % self.max_neighbors, self.out_log, self.global_log) 

158 neighbors = np.arange(1, self.max_neighbors + 1) 

159 train_accuracy = np.empty(len(neighbors)) 

160 test_accuracy = np.empty(len(neighbors)) 

161 std_acc = np.zeros((self.max_neighbors)) 

162 

163 # scale dataset 

164 if self.scale: 

165 X_test = scaler.fit_transform(X_test) 

166 

167 for i, k in enumerate(neighbors): 

168 # Setup a knn classifier with k neighbors 

169 model = KNeighborsClassifier(n_neighbors=k) 

170 # Fit the model 

171 arrays_fit = (X_train, y_train) 

172 # if user provide weights 

173 if self.weight: 

174 arrays_fit = arrays_fit + (w_train,) 

175 model.fit(*arrays_fit) 

176 # Compute accuracy on the training set 

177 train_accuracy[i] = model.score(X_train, y_train) 

178 # Compute accuracy on the test set 

179 test_accuracy[i] = model.score(X_test, y_test) 

180 # deviation 

181 yhat_test = model.predict(X_test) 

182 std_acc[i - 1] = np.std(yhat_test == y_test) / np.sqrt(yhat_test.shape[0]) 

183 

184 # best K / best accuracy 

185 best_k = test_accuracy.argmax() + 1 

186 best_accuracy = test_accuracy.max() 

187 

188 # accuracy table 

189 test_table_accuracy = pd.DataFrame(data={'K': np.arange(1, self.max_neighbors + 1), 'accuracy': test_accuracy}) 

190 fu.log('Calculating accuracy for each K\n\nACCURACY\n\n%s\n' % test_table_accuracy.to_string(index=False), self.out_log, self.global_log) 

191 

192 # classification report 

193 cr_test = classification_report(y_test, model.predict(X_test)) 

194 # log loss 

195 yhat_prob = model.predict_proba(X_test) 

196 l_loss = log_loss(y_test, yhat_prob) 

197 fu.log('Calculating report for testing dataset and best K = %d | accuracy = %.3f\n\nCLASSIFICATION REPORT\n\n%s\nLog loss: %.3f\n' % (best_k, best_accuracy, cr_test, l_loss), self.out_log, self.global_log) 

198 

199 fu.log('Saving results to %s' % self.io_dict["out"]["output_results_path"], self.out_log, self.global_log) 

200 test_table_accuracy.to_csv(self.io_dict["out"]["output_results_path"], index=False, header=True, float_format='%.3f') 

201 

202 # accuracy plot 

203 if self.io_dict["out"]["output_plot_path"]: 

204 fu.log('Saving accuracy plot to %s' % self.io_dict["out"]["output_plot_path"], self.out_log, self.global_log) 

205 plt.title('k-NN Varying number of neighbors') 

206 plt.fill_between(range(1, self.max_neighbors + 1), test_accuracy - std_acc, test_accuracy + std_acc, alpha=0.10) 

207 plt.plot(neighbors, train_accuracy) 

208 plt.plot(neighbors, test_accuracy) 

209 plt.axvline(x=best_k, c='red') 

210 plt.legend(('Training Accuracy', 'Testing accuracy', 'Best K', '+/- 3xstd')) 

211 plt.xlabel('Number of neighbors') 

212 plt.ylabel('Accuracy') 

213 plt.savefig(self.io_dict["out"]["output_plot_path"], dpi=150) 

214 plt.tight_layout() 

215 

216 # Copy files to host 

217 self.copy_to_host() 

218 

219 self.tmp_files.extend([ 

220 self.stage_io_dict.get("unique_dir") 

221 ]) 

222 self.remove_tmp_files() 

223 

224 self.check_arguments(output_files_created=True, raise_exception=False) 

225 

226 return 0 

227 

228 

229def k_neighbors_coefficient(input_dataset_path: str, output_results_path: str, output_plot_path: str = None, properties: dict = None, **kwargs) -> int: 

230 """Execute the :class:`KNeighborsCoefficient <classification.k_neighbors_coefficient.KNeighborsCoefficient>` class and 

231 execute the :meth:`launch() <classification.k_neighbors_coefficient.KNeighborsCoefficient.launch>` method.""" 

232 

233 return KNeighborsCoefficient(input_dataset_path=input_dataset_path, 

234 output_results_path=output_results_path, 

235 output_plot_path=output_plot_path, 

236 properties=properties, **kwargs).launch() 

237 

238 

239def main(): 

240 """Command line execution of this building block. Please check the command line documentation.""" 

241 parser = argparse.ArgumentParser(description="Wrapper of the scikit-learn KNeighborsClassifier method. ", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999)) 

242 parser.add_argument('--config', required=False, help='Configuration file') 

243 

244 # Specific args of each building block 

245 required_args = parser.add_argument_group('required arguments') 

246 required_args.add_argument('--input_dataset_path', required=True, help='Path to the input dataset. Accepted formats: csv.') 

247 required_args.add_argument('--output_results_path', required=True, help='Path to the accuracy values list. Accepted formats: csv.') 

248 parser.add_argument('--output_plot_path', required=False, help='Path to the accuracy plot. Accepted formats: png.') 

249 

250 args = parser.parse_args() 

251 args.config = args.config or "{}" 

252 properties = settings.ConfReader(config=args.config).get_prop_dic() 

253 

254 # Specific call of each building block 

255 k_neighbors_coefficient(input_dataset_path=args.input_dataset_path, 

256 output_results_path=args.output_results_path, 

257 output_plot_path=args.output_plot_path, 

258 properties=properties) 

259 

260 

261if __name__ == '__main__': 

262 main()