Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -110,15 +110,27 @@ def classify_toxicity(audio_file, classify_anxiety, emo_class, explitive_selecti
|
|
| 110 |
# formatted_classification_output = "\n".join([f"{key}: {value}" for key, value in classification_output.items()])
|
| 111 |
# label_score_pairs = [(label, score) for label, score in zip(classification_output['labels'], classification_output['scores'])]
|
| 112 |
label_score_dict = {label: score for label, score in zip(classification_output['labels'], classification_output['scores'])}
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
if
|
| 116 |
-
|
| 117 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 118 |
else:
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 122 |
# return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
|
| 123 |
|
| 124 |
def positive_affirmations():
|
|
|
|
| 110 |
# formatted_classification_output = "\n".join([f"{key}: {value}" for key, value in classification_output.items()])
|
| 111 |
# label_score_pairs = [(label, score) for label, score in zip(classification_output['labels'], classification_output['scores'])]
|
| 112 |
label_score_dict = {label: score for label, score in zip(classification_output['labels'], classification_output['scores'])}
|
| 113 |
+
k = max(label_score_dict, value=label_score_dict.get)
|
| 114 |
+
maxval = label_score_dict[k]
|
| 115 |
+
if maxval > tox_score:
|
| 116 |
+
if maxval > threshold:
|
| 117 |
+
print("Toxic")
|
| 118 |
+
affirm = positive_affirmations()
|
| 119 |
+
topScore = maxval
|
| 120 |
+
else:
|
| 121 |
+
print("Not Toxic")
|
| 122 |
+
affirm = ""
|
| 123 |
+
topScore = maxval
|
| 124 |
else:
|
| 125 |
+
if tox_score > threshold:
|
| 126 |
+
affirm = positive_affirmations()
|
| 127 |
+
topScore = toxicity_score
|
| 128 |
+
else:
|
| 129 |
+
print("Not Toxic")
|
| 130 |
+
affirm = ""
|
| 131 |
+
topScore = toxicity_score
|
| 132 |
+
|
| 133 |
+
return transcribed_text, topScore, label_score_dict, affirm
|
| 134 |
# return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
|
| 135 |
|
| 136 |
def positive_affirmations():
|