Coverage for biobb_ml/classification/k_neighbors_coefficient.py: 85%
124 statements
« prev ^ index » next coverage.py v7.6.1, created at 2024-10-03 14:57 +0000
« prev ^ index » next coverage.py v7.6.1, created at 2024-10-03 14:57 +0000
1#!/usr/bin/env python3
3"""Module containing the KNeighborsCoefficient class and the command line interface."""
4import argparse
5import pandas as pd
6import numpy as np
7import matplotlib.pyplot as plt
8from biobb_common.generic.biobb_object import BiobbObject
9from sklearn.preprocessing import StandardScaler
10from sklearn.model_selection import train_test_split
11from sklearn.metrics import classification_report, log_loss
12from sklearn.neighbors import KNeighborsClassifier
13from biobb_common.configuration import settings
14from biobb_common.tools import file_utils as fu
15from biobb_common.tools.file_utils import launchlogger
16from biobb_ml.classification.common import check_input_path, check_output_path, getHeader, getIndependentVars, getIndependentVarsList, getTarget, getTargetValue, getWeight
19class KNeighborsCoefficient(BiobbObject):
20 """
21 | biobb_ml KNeighborsCoefficient
22 | Wrapper of the scikit-learn KNeighborsClassifier method.
23 | Trains and tests a given dataset and calculates the best K coefficient. Visit the `KNeighborsClassifier documentation page <https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html>`_ in the sklearn official website for further information.
25 Args:
26 input_dataset_path (str): Path to the input dataset. File type: input. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/data/classification/dataset_k_neighbors_coefficient.csv>`_. Accepted formats: csv (edam:format_3752).
27 output_results_path (str): Path to the accuracy values list. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/classification/ref_output_test_k_neighbors_coefficient.csv>`_. Accepted formats: csv (edam:format_3752).
28 output_plot_path (str) (Optional): Path to the accuracy plot. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/classification/ref_output_plot_k_neighbors_coefficient.png>`_. Accepted formats: png (edam:format_3603).
29 properties (dic - Python dictionary object containing the tool parameters, not input/output files):
30 * **independent_vars** (*list*) - (None) Independent variables or columns from your dataset you want to train.
31 * **target** (*string*) - (None) Dependent variable or column from your dataset you want to predict.
32 * **metric** (*string*) - ("minkowski") The distance metric to use for the tree. Values: euclidean (Computes the Euclidean distance between two 1-D arrays), manhattan (Compute the Manhattan distance), chebyshev (Compute the Chebyshev distance), minkowski (Compute the Minkowski distance between two 1-D arrays), wminkowski (Compute the weighted Minkowski distance between two 1-D arrays), seuclidean (Return the standardized Euclidean distance between two 1-D arrays), mahalanobi (Compute the Mahalanobis distance between two 1-D arrays).
33 * **max_neighbors** (*int*) - (6) [1~100|1] Maximum number of neighbors to use by default for kneighbors queries.
34 * **random_state_train_test** (*int*) - (5) [1~1000|1] Controls the shuffling applied to the data before applying the split.
35 * **test_size** (*float*) - (0.2) [0~1|0.05] Represents the proportion of the dataset to include in the test split. It should be between 0.0 and 1.0.
36 * **scale** (*bool*) - (False) Whether or not to scale the input dataset.
37 * **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files.
38 * **restart** (*bool*) - (False) [WF property] Do not execute if output files exist.
39 * **sandbox_path** (*str*) - ("./") [WF property] Parent path to the sandbox directory.
41 Examples:
42 This is a use example of how to use the building block from Python::
44 from biobb_ml.classification.k_neighbors_coefficient import k_neighbors_coefficient
45 prop = {
46 'independent_vars': {
47 'columns': [ 'column1', 'column2', 'column3' ]
48 },
49 'target': {
50 'column': 'target'
51 },
52 'max_neighbors': 6,
53 'test_size': 0.2
54 }
55 k_neighbors_coefficient(input_dataset_path='/path/to/myDataset.csv',
56 output_results_path='/path/to/newTable.csv',
57 output_plot_path='/path/to/newPlot.png',
58 properties=prop)
60 Info:
61 * wrapped_software:
62 * name: scikit-learn KNeighborsClassifier
63 * version: >=0.24.2
64 * license: BSD 3-Clause
65 * ontology:
66 * name: EDAM
67 * schema: http://edamontology.org/EDAM.owl
69 """
71 def __init__(self, input_dataset_path, output_results_path,
72 output_plot_path=None, properties=None, **kwargs) -> None:
73 properties = properties or {}
75 # Call parent class constructor
76 super().__init__(properties)
77 self.locals_var_dict = locals().copy()
79 # Input/Output files
80 self.io_dict = {
81 "in": {"input_dataset_path": input_dataset_path},
82 "out": {"output_results_path": output_results_path, "output_plot_path": output_plot_path}
83 }
85 # Properties specific for BB
86 self.independent_vars = properties.get('independent_vars', {})
87 self.target = properties.get('target', {})
88 self.weight = properties.get('weight', {})
89 self.metric = properties.get('metric', 'minkowski')
90 self.max_neighbors = properties.get('max_neighbors', 6)
91 self.random_state_train_test = properties.get('random_state_train_test', 5)
92 self.test_size = properties.get('test_size', 0.2)
93 self.scale = properties.get('scale', False)
94 self.properties = properties
96 # Check the properties
97 self.check_properties(properties)
98 self.check_arguments()
100 def check_data_params(self, out_log, err_log):
101 """ Checks all the input/output paths and parameters """
102 self.io_dict["in"]["input_dataset_path"] = check_input_path(self.io_dict["in"]["input_dataset_path"], "input_dataset_path", out_log, self.__class__.__name__)
103 self.io_dict["out"]["output_results_path"] = check_output_path(self.io_dict["out"]["output_results_path"], "output_results_path", False, out_log, self.__class__.__name__)
104 if self.io_dict["out"]["output_plot_path"]:
105 self.io_dict["out"]["output_plot_path"] = check_output_path(self.io_dict["out"]["output_plot_path"], "output_plot_path", True, out_log, self.__class__.__name__)
107 @launchlogger
108 def launch(self) -> int:
109 """Execute the :class:`KNeighborsCoefficient <classification.k_neighbors_coefficient.KNeighborsCoefficient>` classification.k_neighbors_coefficient.KNeighborsCoefficient object."""
111 # check input/output paths and parameters
112 self.check_data_params(self.out_log, self.err_log)
114 # Setup Biobb
115 if self.check_restart():
116 return 0
117 self.stage_files()
119 # load dataset
120 fu.log('Getting dataset from %s' % self.io_dict["in"]["input_dataset_path"], self.out_log, self.global_log)
121 if 'columns' in self.independent_vars:
122 labels = getHeader(self.io_dict["in"]["input_dataset_path"])
123 skiprows = 1
124 else:
125 labels = None
126 skiprows = None
127 data = pd.read_csv(self.io_dict["in"]["input_dataset_path"], header=None, sep="\\s+|;|:|,|\t", engine="python", skiprows=skiprows, names=labels)
129 # declare inputs, targets and weights
130 # the inputs are all the independent variables
131 X = getIndependentVars(self.independent_vars, data, self.out_log, self.__class__.__name__)
132 fu.log('Independent variables: [%s]' % (getIndependentVarsList(self.independent_vars)), self.out_log, self.global_log)
133 # target
134 y = getTarget(self.target, data, self.out_log, self.__class__.__name__)
135 fu.log('Target: %s' % (getTargetValue(self.target)), self.out_log, self.global_log)
136 # weights
137 if self.weight:
138 w = getWeight(self.weight, data, self.out_log, self.__class__.__name__)
139 fu.log('Weight column provided', self.out_log, self.global_log)
141 # train / test split
142 fu.log('Creating train and test sets', self.out_log, self.global_log)
143 arrays_sets = (X, y)
144 # if user provide weights
145 if self.weight:
146 arrays_sets = arrays_sets + (w,)
147 X_train, X_test, y_train, y_test, w_train, w_test = train_test_split(*arrays_sets, test_size=self.test_size, random_state=self.random_state_train_test)
148 else:
149 X_train, X_test, y_train, y_test = train_test_split(*arrays_sets, test_size=self.test_size, random_state=self.random_state_train_test)
151 # scale dataset
152 if self.scale:
153 fu.log('Scaling dataset', self.out_log, self.global_log)
154 scaler = StandardScaler()
155 X_train = scaler.fit_transform(X_train)
157 # training and getting accuracy for each K
158 fu.log('Training dataset applying k neighbors classification from 1 to %d n_neighbors' % self.max_neighbors, self.out_log, self.global_log)
159 neighbors = np.arange(1, self.max_neighbors + 1)
160 train_accuracy = np.empty(len(neighbors))
161 test_accuracy = np.empty(len(neighbors))
162 std_acc = np.zeros((self.max_neighbors))
164 # scale dataset
165 if self.scale:
166 X_test = scaler.fit_transform(X_test)
168 for i, k in enumerate(neighbors):
169 # Setup a knn classifier with k neighbors
170 model = KNeighborsClassifier(n_neighbors=k)
171 # Fit the model
172 arrays_fit = (X_train, y_train)
173 # if user provide weights
174 if self.weight:
175 arrays_fit = arrays_fit + (w_train,)
176 model.fit(*arrays_fit)
177 # Compute accuracy on the training set
178 train_accuracy[i] = model.score(X_train, y_train)
179 # Compute accuracy on the test set
180 test_accuracy[i] = model.score(X_test, y_test)
181 # deviation
182 yhat_test = model.predict(X_test)
183 std_acc[i - 1] = np.std(yhat_test == y_test) / np.sqrt(yhat_test.shape[0])
185 # best K / best accuracy
186 best_k = test_accuracy.argmax() + 1
187 best_accuracy = test_accuracy.max()
189 # accuracy table
190 test_table_accuracy = pd.DataFrame(data={'K': np.arange(1, self.max_neighbors + 1), 'accuracy': test_accuracy})
191 fu.log('Calculating accuracy for each K\n\nACCURACY\n\n%s\n' % test_table_accuracy.to_string(index=False), self.out_log, self.global_log)
193 # classification report
194 cr_test = classification_report(y_test, model.predict(X_test))
195 # log loss
196 yhat_prob = model.predict_proba(X_test)
197 l_loss = log_loss(y_test, yhat_prob)
198 fu.log('Calculating report for testing dataset and best K = %d | accuracy = %.3f\n\nCLASSIFICATION REPORT\n\n%s\nLog loss: %.3f\n' % (best_k, best_accuracy, cr_test, l_loss), self.out_log, self.global_log)
200 fu.log('Saving results to %s' % self.io_dict["out"]["output_results_path"], self.out_log, self.global_log)
201 test_table_accuracy.to_csv(self.io_dict["out"]["output_results_path"], index=False, header=True, float_format='%.3f')
203 # accuracy plot
204 if self.io_dict["out"]["output_plot_path"]:
205 fu.log('Saving accuracy plot to %s' % self.io_dict["out"]["output_plot_path"], self.out_log, self.global_log)
206 plt.title('k-NN Varying number of neighbors')
207 plt.fill_between(range(1, self.max_neighbors + 1), test_accuracy - std_acc, test_accuracy + std_acc, alpha=0.10)
208 plt.plot(neighbors, train_accuracy)
209 plt.plot(neighbors, test_accuracy)
210 plt.axvline(x=best_k, c='red')
211 plt.legend(('Training Accuracy', 'Testing accuracy', 'Best K', '+/- 3xstd'))
212 plt.xlabel('Number of neighbors')
213 plt.ylabel('Accuracy')
214 plt.savefig(self.io_dict["out"]["output_plot_path"], dpi=150)
215 plt.tight_layout()
217 # Copy files to host
218 self.copy_to_host()
220 self.tmp_files.extend([
221 self.stage_io_dict.get("unique_dir")
222 ])
223 self.remove_tmp_files()
225 self.check_arguments(output_files_created=True, raise_exception=False)
227 return 0
230def k_neighbors_coefficient(input_dataset_path: str, output_results_path: str, output_plot_path: str = None, properties: dict = None, **kwargs) -> int:
231 """Execute the :class:`KNeighborsCoefficient <classification.k_neighbors_coefficient.KNeighborsCoefficient>` class and
232 execute the :meth:`launch() <classification.k_neighbors_coefficient.KNeighborsCoefficient.launch>` method."""
234 return KNeighborsCoefficient(input_dataset_path=input_dataset_path,
235 output_results_path=output_results_path,
236 output_plot_path=output_plot_path,
237 properties=properties, **kwargs).launch()
240def main():
241 """Command line execution of this building block. Please check the command line documentation."""
242 parser = argparse.ArgumentParser(description="Wrapper of the scikit-learn KNeighborsClassifier method. ", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))
243 parser.add_argument('--config', required=False, help='Configuration file')
245 # Specific args of each building block
246 required_args = parser.add_argument_group('required arguments')
247 required_args.add_argument('--input_dataset_path', required=True, help='Path to the input dataset. Accepted formats: csv.')
248 required_args.add_argument('--output_results_path', required=True, help='Path to the accuracy values list. Accepted formats: csv.')
249 parser.add_argument('--output_plot_path', required=False, help='Path to the accuracy plot. Accepted formats: png.')
251 args = parser.parse_args()
252 args.config = args.config or "{}"
253 properties = settings.ConfReader(config=args.config).get_prop_dic()
255 # Specific call of each building block
256 k_neighbors_coefficient(input_dataset_path=args.input_dataset_path,
257 output_results_path=args.output_results_path,
258 output_plot_path=args.output_plot_path,
259 properties=properties)
262if __name__ == '__main__':
263 main()