Spaces:
Running
Running
Update src/bin/target_family_classifier.py
Browse files
src/bin/target_family_classifier.py
CHANGED
|
@@ -96,7 +96,7 @@ def score_protein_rep(dataset):
|
|
| 96 |
train_index = train_index.dropna(axis=1)
|
| 97 |
test_index = test_index.dropna(axis=1)
|
| 98 |
|
| 99 |
-
conf_matrices = []
|
| 100 |
|
| 101 |
print('Producing protein family predictions...\n')
|
| 102 |
for i in tqdm(range(10)):
|
|
@@ -122,29 +122,24 @@ def score_protein_rep(dataset):
|
|
| 122 |
ac = accuracy_score(test_y, y_pred)
|
| 123 |
accuracy.append(ac)
|
| 124 |
|
| 125 |
-
c_report = classification_report(test_y, y_pred, target_names=target_names, output_dict=True)
|
| 126 |
-
c_matrix = confusion_matrix(test_y, y_pred, labels=labels)
|
| 127 |
-
conf_matrices.append(c_matrix)
|
| 128 |
|
| 129 |
-
class_report = class_based_scores(c_report, c_matrix)
|
| 130 |
mcc_score = matthews_corrcoef(test_y, y_pred)
|
| 131 |
mcc.append(mcc_score)
|
| 132 |
|
| 133 |
-
report_list.append(class_report)
|
| 134 |
|
| 135 |
-
f1_perclass = pd.concat([r['f1-score'] for r in report_list], axis=1)
|
| 136 |
-
ac_perclass = pd.concat([r['accuracy'] for r in report_list], axis=1)
|
| 137 |
-
mcc_perclass = pd.concat([r['mcc'] for r in report_list], axis=1)
|
| 138 |
|
| 139 |
results = {
|
| 140 |
"f1": f1,
|
| 141 |
"accuracy": accuracy,
|
| 142 |
"mcc": mcc,
|
| 143 |
-
"confusion_matrices": conf_matrices,
|
| 144 |
-
"class_reports": report_list,
|
| 145 |
-
"f1_per_class": f1_perclass,
|
| 146 |
-
"accuracy_per_class": ac_perclass,
|
| 147 |
-
"mcc_per_class": mcc_perclass
|
| 148 |
}
|
| 149 |
|
| 150 |
return results
|
|
|
|
| 96 |
train_index = train_index.dropna(axis=1)
|
| 97 |
test_index = test_index.dropna(axis=1)
|
| 98 |
|
| 99 |
+
#conf_matrices = []
|
| 100 |
|
| 101 |
print('Producing protein family predictions...\n')
|
| 102 |
for i in tqdm(range(10)):
|
|
|
|
| 122 |
ac = accuracy_score(test_y, y_pred)
|
| 123 |
accuracy.append(ac)
|
| 124 |
|
| 125 |
+
#c_report = classification_report(test_y, y_pred, target_names=target_names, output_dict=True)
|
| 126 |
+
#c_matrix = confusion_matrix(test_y, y_pred, labels=labels)
|
| 127 |
+
#conf_matrices.append(c_matrix)
|
| 128 |
|
| 129 |
+
#class_report = class_based_scores(c_report, c_matrix)
|
| 130 |
mcc_score = matthews_corrcoef(test_y, y_pred)
|
| 131 |
mcc.append(mcc_score)
|
| 132 |
|
| 133 |
+
#report_list.append(class_report)
|
| 134 |
|
| 135 |
+
#f1_perclass = pd.concat([r['f1-score'] for r in report_list], axis=1)
|
| 136 |
+
#ac_perclass = pd.concat([r['accuracy'] for r in report_list], axis=1)
|
| 137 |
+
#mcc_perclass = pd.concat([r['mcc'] for r in report_list], axis=1)
|
| 138 |
|
| 139 |
results = {
|
| 140 |
"f1": f1,
|
| 141 |
"accuracy": accuracy,
|
| 142 |
"mcc": mcc,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 143 |
}
|
| 144 |
|
| 145 |
return results
|