Coverage for biobb_ml/clustering/k_means_coefficient.py: 85%

93 statements  

« prev     ^ index     » next       coverage.py v7.5.1, created at 2024-05-07 09:39 +0000

1#!/usr/bin/env python3 

2 

3"""Module containing the KMeansCoefficient class and the command line interface.""" 

4import argparse 

5import pandas as pd 

6import numpy as np 

7from biobb_common.generic.biobb_object import BiobbObject 

8from sklearn.preprocessing import StandardScaler 

9from biobb_common.configuration import settings 

10from biobb_common.tools import file_utils as fu 

11from biobb_common.tools.file_utils import launchlogger 

12from biobb_ml.clustering.common import check_input_path, check_output_path, getHeader, getIndependentVars, getIndependentVarsList, hopkins, getWCSS, get_best_K, getGap, getSilhouetthe, plotKmeansTrain 

13 

14 

15class KMeansCoefficient(BiobbObject): 

16 """ 

17 | biobb_ml KMeansCoefficient 

18 | Wrapper of the scikit-learn KMeans method. 

19 | Clusters a given dataset and calculates best K coefficient. Visit the `KMeans documentation page <https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html>`_ in the sklearn official website for further information. 

20 

21 Args: 

22 input_dataset_path (str): Path to the input dataset. File type: input. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/data/clustering/dataset_k_means_coefficient.csv>`_. Accepted formats: csv (edam:format_3752). 

23 output_results_path (str): Table with WCSS (elbow method), Gap and Silhouette coefficients for each cluster. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/clustering/ref_output_results_k_means_coefficient.csv>`_. Accepted formats: csv (edam:format_3752). 

24 output_plot_path (str) (Optional): Path to the elbow method and gap statistics plot. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/clustering/ref_output_plot_k_means_coefficient.png>`_. Accepted formats: png (edam:format_3603). 

25 properties (dic - Python dictionary object containing the tool parameters, not input/output files): 

26 * **predictors** (*dict*) - ({}) Features or columns from your dataset you want to use for fitting. You can specify either a list of columns names from your input dataset, a list of columns indexes or a range of columns indexes. Formats: { "columns": ["column1", "column2"] } or { "indexes": [0, 2, 3, 10, 11, 17] } or { "range": [[0, 20], [50, 102]] }. In case of mulitple formats, the first one will be picked. 

27 * **max_clusters** (*int*) - (6) [1~100|1] Maximum number of clusters to use by default for kmeans queries. 

28 * **random_state_method** (*int*) - (5) [1~1000|1] Determines random number generation for centroid initialization. 

29 * **scale** (*bool*) - (False) Whether or not to scale the input dataset. 

30 * **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files. 

31 * **restart** (*bool*) - (False) [WF property] Do not execute if output files exist. 

32 

33 Examples: 

34 This is a use example of how to use the building block from Python:: 

35 

36 from biobb_ml.clustering.k_means_coefficient import k_means_coefficient 

37 prop = { 

38 'predictors': { 

39 'columns': [ 'column1', 'column2', 'column3' ] 

40 }, 

41 'max_clusters': 3 

42 } 

43 k_means_coefficient(input_dataset_path='/path/to/myDataset.csv', 

44 output_results_path='/path/to/newTable.csv', 

45 output_plot_path='/path/to/newPlot.png', 

46 properties=prop) 

47 

48 Info: 

49 * wrapped_software: 

50 * name: scikit-learn KMeans 

51 * version: >=0.24.2 

52 * license: BSD 3-Clause 

53 * ontology: 

54 * name: EDAM 

55 * schema: http://edamontology.org/EDAM.owl 

56 

57 """ 

58 

59 def __init__(self, input_dataset_path, output_results_path, 

60 output_plot_path=None, properties=None, **kwargs) -> None: 

61 properties = properties or {} 

62 

63 # Call parent class constructor 

64 super().__init__(properties) 

65 self.locals_var_dict = locals().copy() 

66 

67 # Input/Output files 

68 self.io_dict = { 

69 "in": {"input_dataset_path": input_dataset_path}, 

70 "out": {"output_results_path": output_results_path, "output_plot_path": output_plot_path} 

71 } 

72 

73 # Properties specific for BB 

74 self.predictors = properties.get('predictors', {}) 

75 self.max_clusters = properties.get('max_clusters', 6) 

76 self.random_state_method = properties.get('random_state_method', 5) 

77 self.scale = properties.get('scale', False) 

78 self.properties = properties 

79 

80 # Check the properties 

81 self.check_properties(properties) 

82 self.check_arguments() 

83 

84 def check_data_params(self, out_log, err_log): 

85 """ Checks all the input/output paths and parameters """ 

86 self.io_dict["in"]["input_dataset_path"] = check_input_path(self.io_dict["in"]["input_dataset_path"], "input_dataset_path", out_log, self.__class__.__name__) 

87 self.io_dict["out"]["output_results_path"] = check_output_path(self.io_dict["out"]["output_results_path"], "output_results_path", False, out_log, self.__class__.__name__) 

88 if self.io_dict["out"]["output_plot_path"]: 

89 self.io_dict["out"]["output_plot_path"] = check_output_path(self.io_dict["out"]["output_plot_path"], "output_plot_path", True, out_log, self.__class__.__name__) 

90 

91 @launchlogger 

92 def launch(self) -> int: 

93 """Execute the :class:`KMeansCoefficient <clustering.k_means_coefficient.KMeansCoefficient>` clustering.k_means_coefficient.KMeansCoefficient object.""" 

94 

95 # check input/output paths and parameters 

96 self.check_data_params(self.out_log, self.err_log) 

97 

98 # Setup Biobb 

99 if self.check_restart(): 

100 return 0 

101 self.stage_files() 

102 

103 # load dataset 

104 fu.log('Getting dataset from %s' % self.io_dict["in"]["input_dataset_path"], self.out_log, self.global_log) 

105 if 'columns' in self.predictors: 

106 labels = getHeader(self.io_dict["in"]["input_dataset_path"]) 

107 skiprows = 1 

108 else: 

109 labels = None 

110 skiprows = None 

111 data = pd.read_csv(self.io_dict["in"]["input_dataset_path"], header=None, sep="\\s+|;|:|,|\t", engine="python", skiprows=skiprows, names=labels) 

112 

113 # the features are the predictors 

114 predictors = getIndependentVars(self.predictors, data, self.out_log, self.__class__.__name__) 

115 fu.log('Predictors: [%s]' % (getIndependentVarsList(self.predictors)), self.out_log, self.global_log) 

116 

117 # Hopkins test 

118 H = hopkins(predictors) 

119 fu.log('Performing Hopkins test over dataset. H = %f' % H, self.out_log, self.global_log) 

120 

121 # scale dataset 

122 if self.scale: 

123 fu.log('Scaling dataset', self.out_log, self.global_log) 

124 scaler = StandardScaler() 

125 predictors = scaler.fit_transform(predictors) 

126 

127 # calculate wcss for each cluster 

128 fu.log('Calculating Within-Clusters Sum of Squares (WCSS) for each %d clusters' % self.max_clusters, self.out_log, self.global_log) 

129 wcss = getWCSS('kmeans', self.max_clusters, predictors) 

130 

131 # wcss table 

132 wcss_table = pd.DataFrame(data={'cluster': np.arange(1, self.max_clusters + 1), 'WCSS': wcss}) 

133 fu.log('Calculating WCSS for each cluster\n\nWCSS TABLE\n\n%s\n' % wcss_table.to_string(index=False), self.out_log, self.global_log) 

134 

135 # get best cluster elbow method 

136 best_k, elbow_index = get_best_K(wcss) 

137 fu.log('Optimal number of clusters according to the Elbow Method is %d' % best_k, self.out_log, self.global_log) 

138 

139 # calculate gap 

140 best_g, gap = getGap('kmeans', predictors, nrefs=5, maxClusters=(self.max_clusters + 1)) 

141 

142 # gap table 

143 gap_table = pd.DataFrame(data={'cluster': np.arange(1, self.max_clusters + 1), 'GAP': gap['gap']}) 

144 fu.log('Calculating Gap for each cluster\n\nGAP TABLE\n\n%s\n' % gap_table.to_string(index=False), self.out_log, self.global_log) 

145 

146 # log best cluster gap method 

147 fu.log('Optimal number of clusters according to the Gap Statistics Method is %d' % best_g, self.out_log, self.global_log) 

148 

149 # calculate silhouette 

150 silhouette_list, s_list = getSilhouetthe(method='kmeans', X=predictors, max_clusters=self.max_clusters, random_state=self.random_state_method) 

151 

152 # silhouette table 

153 silhouette_table = pd.DataFrame(data={'cluster': np.arange(1, self.max_clusters + 1), 'SILHOUETTE': silhouette_list}) 

154 fu.log('Calculating Silhouette for each cluster\n\nSILHOUETTE TABLE\n\n%s\n' % silhouette_table.to_string(index=False), self.out_log, self.global_log) 

155 

156 # get best cluster silhouette method 

157 key = silhouette_list.index(max(silhouette_list)) 

158 best_s = s_list.__getitem__(key) 

159 fu.log('Optimal number of clusters according to the Silhouette Method is %d' % best_s, self.out_log, self.global_log) 

160 

161 # save results table 

162 results_table = pd.DataFrame(data={'method': ['elbow', 'gap', 'silhouette'], 'coefficient': [wcss[elbow_index], max(gap['gap']), max(silhouette_list)], 'clusters': [best_k, best_g, best_s]}) 

163 fu.log('Gathering results\n\nRESULTS TABLE\n\n%s\n' % results_table.to_string(index=False), self.out_log, self.global_log) 

164 fu.log('Saving results to %s' % self.io_dict["out"]["output_results_path"], self.out_log, self.global_log) 

165 results_table.to_csv(self.io_dict["out"]["output_results_path"], index=False, header=True, float_format='%.3f') 

166 

167 # wcss plot 

168 if self.io_dict["out"]["output_plot_path"]: 

169 fu.log('Saving methods plot to %s' % self.io_dict["out"]["output_plot_path"], self.out_log, self.global_log) 

170 plot = plotKmeansTrain(self.max_clusters, wcss, gap['gap'], silhouette_list, best_k, best_g, best_s) 

171 plot.savefig(self.io_dict["out"]["output_plot_path"], dpi=150) 

172 

173 # Copy files to host 

174 self.copy_to_host() 

175 

176 self.tmp_files.extend([ 

177 self.stage_io_dict.get("unique_dir") 

178 ]) 

179 self.remove_tmp_files() 

180 

181 self.check_arguments(output_files_created=True, raise_exception=False) 

182 

183 return 0 

184 

185 

186def k_means_coefficient(input_dataset_path: str, output_results_path: str, output_plot_path: str = None, properties: dict = None, **kwargs) -> int: 

187 """Execute the :class:`KMeansCoefficient <clustering.k_means_coefficient.KMeansCoefficient>` class and 

188 execute the :meth:`launch() <clustering.k_means_coefficient.KMeansCoefficient.launch>` method.""" 

189 

190 return KMeansCoefficient(input_dataset_path=input_dataset_path, 

191 output_results_path=output_results_path, 

192 output_plot_path=output_plot_path, 

193 properties=properties, **kwargs).launch() 

194 

195 

196def main(): 

197 """Command line execution of this building block. Please check the command line documentation.""" 

198 parser = argparse.ArgumentParser(description="Wrapper of the scikit-learn KMeans method.", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999)) 

199 parser.add_argument('--config', required=False, help='Configuration file') 

200 

201 # Specific args of each building block 

202 required_args = parser.add_argument_group('required arguments') 

203 required_args.add_argument('--input_dataset_path', required=True, help='Path to the input dataset. Accepted formats: csv.') 

204 required_args.add_argument('--output_results_path', required=True, help='Table with WCSS (elbow method), Gap and Silhouette coefficients for each cluster. Accepted formats: csv.') 

205 parser.add_argument('--output_plot_path', required=False, help='Path to the elbow and gap methods plot. Accepted formats: png.') 

206 

207 args = parser.parse_args() 

208 args.config = args.config or "{}" 

209 properties = settings.ConfReader(config=args.config).get_prop_dic() 

210 

211 # Specific call of each building block 

212 k_means_coefficient(input_dataset_path=args.input_dataset_path, 

213 output_results_path=args.output_results_path, 

214 output_plot_path=args.output_plot_path, 

215 properties=properties) 

216 

217 

218if __name__ == '__main__': 

219 main()