Coverage for biobb_ml/clustering/dbscan.py: 84%
96 statements
« prev ^ index » next coverage.py v7.6.1, created at 2024-10-03 14:57 +0000
« prev ^ index » next coverage.py v7.6.1, created at 2024-10-03 14:57 +0000
1#!/usr/bin/env python3
3"""Module containing the DBSCANClustering class and the command line interface."""
4import argparse
5import pandas as pd
6from biobb_common.generic.biobb_object import BiobbObject
7from sklearn.preprocessing import StandardScaler
8from sklearn.cluster import DBSCAN
9from biobb_common.configuration import settings
10from biobb_common.tools import file_utils as fu
11from biobb_common.tools.file_utils import launchlogger
12from biobb_ml.clustering.common import check_input_path, check_output_path, getHeader, getIndependentVars, getIndependentVarsList, hopkins, plotCluster
15class DBSCANClustering(BiobbObject):
16 """
17 | biobb_ml DBSCANClustering
18 | Wrapper of the scikit-learn DBSCAN method.
19 | Clusters a given dataset. Visit the `DBSCAN documentation page <https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html>`_ in the sklearn official website for further information.
21 Args:
22 input_dataset_path (str): Path to the input dataset. File type: input. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/data/clustering/dataset_dbscan.csv>`_. Accepted formats: csv (edam:format_3752).
23 output_results_path (str): Path to the clustered dataset. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/clustering/ref_output_results_dbscan.csv>`_. Accepted formats: csv (edam:format_3752).
24 output_plot_path (str) (Optional): Path to the clustering plot. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/clustering/ref_output_plot_dbscan.png>`_. Accepted formats: png (edam:format_3603).
25 properties (dic - Python dictionary object containing the tool parameters, not input/output files):
26 * **predictors** (*dict*) - ({}) Features or columns from your dataset you want to use for fitting. You can specify either a list of columns names from your input dataset, a list of columns indexes or a range of columns indexes. Formats: { "columns": ["column1", "column2"] } or { "indexes": [0, 2, 3, 10, 11, 17] } or { "range": [[0, 20], [50, 102]] }. In case of mulitple formats, the first one will be picked.
27 * **eps** (*float*) - (0.5) [0~10|0.1] The maximum distance between two samples for one to be considered as in the neighborhood of the other.
28 * **min_samples** (*int*) - (5) [1~100|1] The number of samples (or total weight) in a neighborhood for a point to be considered as a core point. This includes the point itself.
29 * **metric** (*str*) - ("euclidean") The metric to use when calculating distance between instances in a feature array. Values: cityblock (Compute the City Block -Manhattan- distance), cosine (Compute the Cosine distance between 1-D arrays), euclidean (Computes the Euclidean distance between two 1-D arrays), l1, l2, manhattan (Compute the Manhattan distance), braycurtis (Compute the Bray-Curtis distance between two 1-D arrays), canberra (Compute the Canberra distance between two 1-D arrays), chebyshev (Compute the Chebyshev distance), correlation (Compute the correlation distance between two 1-D arrays), dice (Compute the Dice dissimilarity between two boolean 1-D arrays), hamming (Compute the Hamming distance between two 1-D arrays), jaccard (Compute the Jaccard-Needham dissimilarity between two boolean 1-D arrays), kulsinski (Compute the Kulsinski dissimilarity between two boolean 1-D arrays), mahalanobis (Compute the Mahalanobis distance between two 1-D arrays), minkowski (Compute the Minkowski distance between two 1-D arrays), rogerstanimoto (Compute the Rogers-Tanimoto dissimilarity between two boolean 1-D arrays), russellrao (Compute the Russell-Rao dissimilarity between two boolean 1-D arrays), seuclidean (Return the standardized Euclidean distance between two 1-D arrays), sokalmichener (Compute the Sokal-Michener dissimilarity between two boolean 1-D arrays), sokalsneath (Compute the Sokal-Sneath dissimilarity between two boolean 1-D arrays), sqeuclidean (Compute the squared Euclidean distance between two 1-D arrays), yule (Compute the Yule dissimilarity between two boolean 1-D arrays).
30 * **plots** (*list*) - (None) List of dictionaries with all plots you want to generate. Only 2D or 3D plots accepted. Format: [ { 'title': 'Plot 1', 'features': ['feat1', 'feat2'] } ].
31 * **scale** (*bool*) - (False) Whether or not to scale the input dataset.
32 * **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files.
33 * **restart** (*bool*) - (False) [WF property] Do not execute if output files exist.
34 * **sandbox_path** (*str*) - ("./") [WF property] Parent path to the sandbox directory.
36 Examples:
37 This is a use example of how to use the building block from Python::
39 from biobb_ml.clustering.dbscan import dbscan
40 prop = {
41 'predictors': {
42 'columns': [ 'column1', 'column2', 'column3' ]
43 },
44 'eps': 1.4,
45 'min_samples': 3,
46 'metric': 'euclidean',
47 'plots': [
48 {
49 'title': 'Plot 1',
50 'features': ['feat1', 'feat2']
51 }
52 ]
53 }
54 dbscan(input_dataset_path='/path/to/myDataset.csv',
55 output_results_path='/path/to/newTable.csv',
56 output_plot_path='/path/to/newPlot.png',
57 properties=prop)
59 Info:
60 * wrapped_software:
61 * name: scikit-learn DBSCAN
62 * version: >=0.24.2
63 * license: BSD 3-Clause
64 * ontology:
65 * name: EDAM
66 * schema: http://edamontology.org/EDAM.owl
68 """
70 def __init__(self, input_dataset_path, output_results_path,
71 output_plot_path=None, properties=None, **kwargs) -> None:
72 properties = properties or {}
74 # Call parent class constructor
75 super().__init__(properties)
76 self.locals_var_dict = locals().copy()
78 # Input/Output files
79 self.io_dict = {
80 "in": {"input_dataset_path": input_dataset_path},
81 "out": {"output_results_path": output_results_path, "output_plot_path": output_plot_path}
82 }
84 # Properties specific for BB
85 self.predictors = properties.get('predictors', {})
86 self.eps = properties.get('eps', .5)
87 self.min_samples = properties.get('min_samples', 5)
88 self.metric = properties.get('metric', 'euclidean')
89 self.plots = properties.get('plots', [])
90 self.scale = properties.get('scale', False)
91 self.properties = properties
93 # Check the properties
94 self.check_properties(properties)
95 self.check_arguments()
97 def check_data_params(self, out_log, err_log):
98 """ Checks all the input/output paths and parameters """
99 self.io_dict["in"]["input_dataset_path"] = check_input_path(self.io_dict["in"]["input_dataset_path"], "input_dataset_path", out_log, self.__class__.__name__)
100 self.io_dict["out"]["output_results_path"] = check_output_path(self.io_dict["out"]["output_results_path"], "output_results_path", False, out_log, self.__class__.__name__)
101 if self.io_dict["out"]["output_plot_path"]:
102 self.io_dict["out"]["output_plot_path"] = check_output_path(self.io_dict["out"]["output_plot_path"], "output_plot_path", True, out_log, self.__class__.__name__)
104 @launchlogger
105 def launch(self) -> int:
106 """Execute the :class:`DBSCANClustering <clustering.dbscan.DBSCANClustering>` clustering.dbscan.DBSCANClustering object."""
108 # check input/output paths and parameters
109 self.check_data_params(self.out_log, self.err_log)
111 # Setup Biobb
112 if self.check_restart():
113 return 0
114 self.stage_files()
116 # load dataset
117 fu.log('Getting dataset from %s' % self.io_dict["in"]["input_dataset_path"], self.out_log, self.global_log)
118 if 'columns' in self.predictors:
119 labels = getHeader(self.io_dict["in"]["input_dataset_path"])
120 skiprows = 1
121 else:
122 labels = None
123 skiprows = None
124 data = pd.read_csv(self.io_dict["in"]["input_dataset_path"], header=None, sep="\\s+|;|:|,|\t", engine="python", skiprows=skiprows, names=labels)
126 # the features are the predictors
127 predictors = getIndependentVars(self.predictors, data, self.out_log, self.__class__.__name__)
128 fu.log('Predictors: [%s]' % (getIndependentVarsList(self.predictors)), self.out_log, self.global_log)
130 # Hopkins test
131 H = hopkins(predictors)
132 fu.log('Performing Hopkins test over dataset. H = %f' % H, self.out_log, self.global_log)
134 # scale dataset
135 if self.scale:
136 fu.log('Scaling dataset', self.out_log, self.global_log)
137 scaler = StandardScaler()
138 predictors = scaler.fit_transform(predictors)
140 # create a DBSCAN object with self.clusters clusters
141 model = DBSCAN(eps=self.eps, min_samples=self.min_samples, metric=self.metric)
142 # fit the data
143 model.fit(predictors)
145 # create a copy of data, so we can see the clusters next to the original data
146 clusters = data.copy()
147 # predict the cluster for each observation
148 clusters['cluster'] = model.fit_predict(predictors)
150 fu.log('Calculating results\n\nCLUSTERING TABLE\n\n%s\n' % clusters, self.out_log, self.global_log)
152 # get number of clusters discarding outliers
153 clstrs = set(clusters['cluster'])
154 if -1 in clstrs:
155 clstrs.remove(-1)
156 fu.log('Total of clusters computed by DBSCAN = %d' % len(clstrs), self.out_log, self.global_log)
158 outliers = clusters['cluster'].tolist().count(-1)
159 op = (outliers / len(clusters['cluster'].tolist())) * 100
160 fu.log('Total of outliers = %d (%.2f%%)' % (outliers, op), self.out_log, self.global_log)
162 # save results
163 fu.log('Saving results to %s' % self.io_dict["out"]["output_results_path"], self.out_log, self.global_log)
164 clusters.to_csv(self.io_dict["out"]["output_results_path"], index=False, header=True, float_format='%.3f')
166 if self.io_dict["out"]["output_plot_path"] and self.plots:
167 new_plots = []
168 i = 0
169 for plot in self.plots:
170 if len(plot['features']) == 2 or len(plot['features']) == 3:
171 new_plots.append(plot)
172 i += 1
173 if i == 6:
174 break
176 plot = plotCluster(new_plots, clusters)
177 fu.log('Saving output plot to %s' % self.io_dict["out"]["output_plot_path"], self.out_log, self.global_log)
178 plot.savefig(self.io_dict["out"]["output_plot_path"], dpi=150)
180 # Copy files to host
181 self.copy_to_host()
183 self.tmp_files.extend([
184 self.stage_io_dict.get("unique_dir")
185 ])
186 self.remove_tmp_files()
188 self.check_arguments(output_files_created=True, raise_exception=False)
190 return 0
193def dbscan(input_dataset_path: str, output_results_path: str, output_plot_path: str = None, properties: dict = None, **kwargs) -> int:
194 """Execute the :class:`DBSCANClustering <clustering.dbscan.DBSCANClustering>` class and
195 execute the :meth:`launch() <clustering.dbscan.DBSCANClustering.launch>` method."""
197 return DBSCANClustering(input_dataset_path=input_dataset_path,
198 output_results_path=output_results_path,
199 output_plot_path=output_plot_path,
200 properties=properties, **kwargs).launch()
203def main():
204 """Command line execution of this building block. Please check the command line documentation."""
205 parser = argparse.ArgumentParser(description="Wrapper of the scikit-learn DBSCAN method.", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))
206 parser.add_argument('--config', required=False, help='Configuration file')
208 # Specific args of each building block
209 required_args = parser.add_argument_group('required arguments')
210 required_args.add_argument('--input_dataset_path', required=True, help='Path to the input dataset. Accepted formats: csv.')
211 required_args.add_argument('--output_results_path', required=True, help='Path to the clustered dataset. Accepted formats: csv.')
212 parser.add_argument('--output_plot_path', required=False, help='Path to the clustering plot. Accepted formats: png.')
214 args = parser.parse_args()
215 args.config = args.config or "{}"
216 properties = settings.ConfReader(config=args.config).get_prop_dic()
218 # Specific call of each building block
219 dbscan(input_dataset_path=args.input_dataset_path,
220 output_results_path=args.output_results_path,
221 output_plot_path=args.output_plot_path,
222 properties=properties)
225if __name__ == '__main__':
226 main()