Coverage for biobb_ml/clustering/clustering_predict.py: 74%

89 statements  

« prev     ^ index     » next       coverage.py v7.6.1, created at 2024-10-03 14:57 +0000

1#!/usr/bin/env python3 

2 

3"""Module containing the ClusteringPredict class and the command line interface.""" 

4import argparse 

5import pandas as pd 

6import joblib 

7from biobb_common.generic.biobb_object import BiobbObject 

8from sklearn.preprocessing import StandardScaler 

9from sklearn.cluster import KMeans 

10from biobb_common.configuration import settings 

11from biobb_common.tools import file_utils as fu 

12from biobb_common.tools.file_utils import launchlogger 

13from biobb_ml.clustering.common import check_input_path, check_output_path, getHeader, get_list_of_predictors, get_keys_of_predictors 

14 

15 

16class ClusteringPredict(BiobbObject): 

17 """ 

18 | biobb_ml ClusteringPredict 

19 | Makes predictions from an input dataset and a given clustering model. 

20 | Makes predictions from an input dataset (provided either as a file or as a dictionary property) and a given clustering model fitted with `KMeans <https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html>`_ method. 

21 

22 Args: 

23 input_model_path (str): Path to the input model. File type: input. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/data/clustering/model_clustering_predict.pkl>`_. Accepted formats: pkl (edam:format_3653). 

24 input_dataset_path (str) (Optional): Path to the dataset to predict. File type: input. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/data/clustering/input_clustering_predict.csv>`_. Accepted formats: csv (edam:format_3752). 

25 output_results_path (str): Path to the output results file. File type: output. `Sample file <https://github.com/bioexcel/biobb_ml/raw/master/biobb_ml/test/reference/clustering/ref_output_results_clustering_predict.csv>`_. Accepted formats: csv (edam:format_3752). 

26 properties (dic - Python dictionary object containing the tool parameters, not input/output files): 

27 * **predictions** (*list*) - (None) List of dictionaries with all values you want to predict targets. It will be taken into account only in case **input_dataset_path** is not provided. Format: [{ 'var1': 1.0, 'var2': 2.0 }, { 'var1': 4.0, 'var2': 2.7 }] for datasets with headers and [[ 1.0, 2.0 ], [ 4.0, 2.7 ]] for datasets without headers. 

28 * **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files. 

29 * **restart** (*bool*) - (False) [WF property] Do not execute if output files exist. 

30 * **sandbox_path** (*str*) - ("./") [WF property] Parent path to the sandbox directory. 

31 

32 Examples: 

33 This is a use example of how to use the building block from Python:: 

34 

35 from biobb_ml.clustering.clustering_predict import clustering_predict 

36 prop = { 

37 'predictions': [ 

38 { 

39 'var1': 1.0, 

40 'var2': 2.0 

41 }, 

42 { 

43 'var1': 4.0, 

44 'var2': 2.7 

45 } 

46 ] 

47 } 

48 clustering_predict(input_model_path='/path/to/myModel.pkl', 

49 output_results_path='/path/to/newPredictedResults.csv', 

50 input_dataset_path='/path/to/myDataset.csv', 

51 properties=prop) 

52 

53 Info: 

54 * wrapped_software: 

55 * name: scikit-learn 

56 * version: >=0.24.2 

57 * license: BSD 3-Clause 

58 * ontology: 

59 * name: EDAM 

60 * schema: http://edamontology.org/EDAM.owl 

61 

62 """ 

63 

64 def __init__(self, input_model_path, output_results_path, 

65 input_dataset_path=None, properties=None, **kwargs) -> None: 

66 properties = properties or {} 

67 

68 # Call parent class constructor 

69 super().__init__(properties) 

70 self.locals_var_dict = locals().copy() 

71 

72 # Input/Output files 

73 self.io_dict = { 

74 "in": {"input_model_path": input_model_path, "input_dataset_path": input_dataset_path}, 

75 "out": {"output_results_path": output_results_path} 

76 } 

77 

78 # Properties specific for BB 

79 self.predictions = properties.get('predictions', []) 

80 self.properties = properties 

81 

82 # Check the properties 

83 self.check_properties(properties) 

84 self.check_arguments() 

85 

86 def check_data_params(self, out_log, err_log): 

87 """ Checks all the input/output paths and parameters """ 

88 self.io_dict["in"]["input_model_path"] = check_input_path(self.io_dict["in"]["input_model_path"], "input_model_path", out_log, self.__class__.__name__) 

89 self.io_dict["out"]["output_results_path"] = check_output_path(self.io_dict["out"]["output_results_path"], "output_results_path", False, out_log, self.__class__.__name__) 

90 if self.io_dict["in"]["input_dataset_path"]: 

91 self.io_dict["in"]["input_dataset_path"] = check_input_path(self.io_dict["in"]["input_dataset_path"], "input_dataset_path", out_log, self.__class__.__name__) 

92 

93 @launchlogger 

94 def launch(self) -> int: 

95 """Execute the :class:`ClusteringPredict <clustering.clustering_predict.ClusteringPredict>` clustering.clustering_predict.ClusteringPredict object.""" 

96 

97 # check input/output paths and parameters 

98 self.check_data_params(self.out_log, self.err_log) 

99 

100 # Setup Biobb 

101 if self.check_restart(): 

102 return 0 

103 self.stage_files() 

104 

105 fu.log('Getting model from %s' % self.io_dict["in"]["input_model_path"], self.out_log, self.global_log) 

106 

107 with open(self.io_dict["in"]["input_model_path"], "rb") as f: 

108 while True: 

109 try: 

110 m = joblib.load(f) 

111 if (isinstance(m, KMeans)): 

112 new_model = m 

113 if isinstance(m, StandardScaler): 

114 scaler = m 

115 if isinstance(m, dict): 

116 variables = m 

117 except EOFError: 

118 break 

119 

120 if self.io_dict["in"]["input_dataset_path"]: 

121 # load dataset from input_dataset_path file 

122 fu.log('Getting dataset from %s' % self.io_dict["in"]["input_dataset_path"], self.out_log, self.global_log) 

123 if 'columns' in variables['predictors']: 

124 labels = getHeader(self.io_dict["in"]["input_dataset_path"]) 

125 skiprows = 1 

126 else: 

127 labels = None 

128 skiprows = None 

129 new_data_table = pd.read_csv(self.io_dict["in"]["input_dataset_path"], header=None, sep="\\s+|;|:|,|\t", engine="python", skiprows=skiprows, names=labels) 

130 else: 

131 # load dataset from properties 

132 if 'columns' in variables['predictors']: 

133 # sorting self.properties in the correct order given by variables['predictors']['columns'] 

134 index_map = {v: i for i, v in enumerate(variables['predictors']['columns'])} 

135 predictions = [] 

136 for i, pred in enumerate(self.predictions): 

137 sorted_pred = sorted(pred.items(), key=lambda pair: index_map[pair[0]]) 

138 predictions.append(dict(sorted_pred)) 

139 new_data_table = pd.DataFrame(data=get_list_of_predictors(predictions), columns=get_keys_of_predictors(predictions)) 

140 else: 

141 predictions = self.predictions 

142 new_data_table = pd.DataFrame(data=predictions) 

143 

144 if variables['scale']: 

145 fu.log('Scaling dataset', self.out_log, self.global_log) 

146 new_data = scaler.transform(new_data_table) 

147 else: 

148 new_data = new_data_table 

149 

150 p = new_model.predict(new_data) 

151 

152 new_data_table['cluster'] = p 

153 fu.log('Predicting results\n\nPREDICTION RESULTS\n\n%s\n' % new_data_table, self.out_log, self.global_log) 

154 fu.log('Saving results to %s' % self.io_dict["out"]["output_results_path"], self.out_log, self.global_log) 

155 new_data_table.to_csv(self.io_dict["out"]["output_results_path"], index=False, header=True, float_format='%.3f') 

156 

157 # Copy files to host 

158 self.copy_to_host() 

159 

160 self.tmp_files.extend([ 

161 self.stage_io_dict.get("unique_dir") 

162 ]) 

163 self.remove_tmp_files() 

164 

165 self.check_arguments(output_files_created=True, raise_exception=False) 

166 

167 return 0 

168 

169 

170def clustering_predict(input_model_path: str, output_results_path: str, input_dataset_path: str = None, properties: dict = None, **kwargs) -> int: 

171 """Execute the :class:`ClusteringPredict <clustering.clustering_predict.ClusteringPredict>` class and 

172 execute the :meth:`launch() <clustering.clustering_predict.ClusteringPredict.launch>` method.""" 

173 

174 return ClusteringPredict(input_model_path=input_model_path, 

175 output_results_path=output_results_path, 

176 input_dataset_path=input_dataset_path, 

177 properties=properties, **kwargs).launch() 

178 

179 

180def main(): 

181 """Command line execution of this building block. Please check the command line documentation.""" 

182 parser = argparse.ArgumentParser(description="Makes predictions from an input dataset and a given clustering model.", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999)) 

183 parser.add_argument('--config', required=False, help='Configuration file') 

184 

185 # Specific args of each building block 

186 required_args = parser.add_argument_group('required arguments') 

187 required_args.add_argument('--input_model_path', required=True, help='Path to the input model. Accepted formats: pkl.') 

188 required_args.add_argument('--output_results_path', required=True, help='Path to the output results file. Accepted formats: csv.') 

189 parser.add_argument('--input_dataset_path', required=False, help='Path to the dataset to predict. Accepted formats: csv.') 

190 

191 args = parser.parse_args() 

192 args.config = args.config or "{}" 

193 properties = settings.ConfReader(config=args.config).get_prop_dic() 

194 

195 # Specific call of each building block 

196 clustering_predict(input_model_path=args.input_model_path, 

197 output_results_path=args.output_results_path, 

198 input_dataset_path=args.input_dataset_path, 

199 properties=properties) 

200 

201 

202if __name__ == '__main__': 

203 main()