Spaces:
Sleeping
Sleeping
Commit
·
52ff483
1
Parent(s):
124003a
Count LLM request and show in UI, minor text changes
Browse files
app.py
CHANGED
|
@@ -64,9 +64,10 @@ def chill_out(text):
|
|
| 64 |
<h4>Details:</h4>
|
| 65 |
<ul>
|
| 66 |
<li>Critique: {result['critique']}</li>
|
| 67 |
-
<li>Faithfulness
|
| 68 |
-
<li>Spicy
|
| 69 |
-
<li>Overall
|
|
|
|
| 70 |
</ul>
|
| 71 |
</div>
|
| 72 |
"""
|
|
|
|
| 64 |
<h4>Details:</h4>
|
| 65 |
<ul>
|
| 66 |
<li>Critique: {result['critique']}</li>
|
| 67 |
+
<li>Faithfulness score: {result['faithfulness_score']}</li>
|
| 68 |
+
<li>Spicy score: {result['spicy_score']}</li>
|
| 69 |
+
<li>Overall score: {result['overall_score']}</li>
|
| 70 |
+
<li>LLM requests made: {result['request_count']}</li>
|
| 71 |
</ul>
|
| 72 |
</div>
|
| 73 |
"""
|
chill.py
CHANGED
|
@@ -41,24 +41,30 @@ Outputs something like this:
|
|
| 41 |
global suggestions
|
| 42 |
suggestions = []
|
| 43 |
last_edit = ""
|
|
|
|
| 44 |
start_time = time.time()
|
| 45 |
|
| 46 |
|
| 47 |
def improve_text_attempt():
|
| 48 |
global suggestions
|
|
|
|
| 49 |
replacements = {
|
| 50 |
"original_text": json.dumps(original_text),
|
| 51 |
"previous_suggestions": json.dumps(suggestions, indent=2),
|
| 52 |
}
|
|
|
|
| 53 |
resp_json = query_ai_prompt(improve_prompt, replacements, ImprovedText)
|
| 54 |
return resp_json["text"]
|
| 55 |
|
| 56 |
|
| 57 |
def critique_text(last_edit):
|
|
|
|
|
|
|
| 58 |
replacements = {"original_text": original_text, "last_edit": last_edit}
|
| 59 |
|
| 60 |
# Query the AI for each of the new prompts separately
|
| 61 |
|
|
|
|
| 62 |
critique_resp = query_ai_prompt(critique_prompt, replacements, Critique)
|
| 63 |
faithfulness_resp = query_ai_prompt(
|
| 64 |
faith_scorer_prompt, replacements, FaithfulnessScore
|
|
@@ -101,6 +107,7 @@ def update_suggestions(critique_dict):
|
|
| 101 |
suggestions = sorted(suggestions, key=lambda x: x["overall_score"], reverse=True)[
|
| 102 |
:2
|
| 103 |
]
|
|
|
|
| 104 |
|
| 105 |
|
| 106 |
def print_iteration_result(iteration, overall_score, time_used):
|
|
@@ -116,9 +123,11 @@ def improvement_loop(input_text):
|
|
| 116 |
global original_text
|
| 117 |
global last_edit
|
| 118 |
global suggestions
|
|
|
|
| 119 |
global start_time
|
| 120 |
suggestions = []
|
| 121 |
last_edit = ""
|
|
|
|
| 122 |
start_time = time.time()
|
| 123 |
max_iterations = 6
|
| 124 |
original_text = input_text
|
|
|
|
| 41 |
global suggestions
|
| 42 |
suggestions = []
|
| 43 |
last_edit = ""
|
| 44 |
+
request_count = 0
|
| 45 |
start_time = time.time()
|
| 46 |
|
| 47 |
|
| 48 |
def improve_text_attempt():
|
| 49 |
global suggestions
|
| 50 |
+
global request_count
|
| 51 |
replacements = {
|
| 52 |
"original_text": json.dumps(original_text),
|
| 53 |
"previous_suggestions": json.dumps(suggestions, indent=2),
|
| 54 |
}
|
| 55 |
+
request_count += 1
|
| 56 |
resp_json = query_ai_prompt(improve_prompt, replacements, ImprovedText)
|
| 57 |
return resp_json["text"]
|
| 58 |
|
| 59 |
|
| 60 |
def critique_text(last_edit):
|
| 61 |
+
global suggestions
|
| 62 |
+
global request_count
|
| 63 |
replacements = {"original_text": original_text, "last_edit": last_edit}
|
| 64 |
|
| 65 |
# Query the AI for each of the new prompts separately
|
| 66 |
|
| 67 |
+
request_count += 3
|
| 68 |
critique_resp = query_ai_prompt(critique_prompt, replacements, Critique)
|
| 69 |
faithfulness_resp = query_ai_prompt(
|
| 70 |
faith_scorer_prompt, replacements, FaithfulnessScore
|
|
|
|
| 107 |
suggestions = sorted(suggestions, key=lambda x: x["overall_score"], reverse=True)[
|
| 108 |
:2
|
| 109 |
]
|
| 110 |
+
critique_dict["request_count"] = request_count
|
| 111 |
|
| 112 |
|
| 113 |
def print_iteration_result(iteration, overall_score, time_used):
|
|
|
|
| 123 |
global original_text
|
| 124 |
global last_edit
|
| 125 |
global suggestions
|
| 126 |
+
global request_count
|
| 127 |
global start_time
|
| 128 |
suggestions = []
|
| 129 |
last_edit = ""
|
| 130 |
+
request_count = 0
|
| 131 |
start_time = time.time()
|
| 132 |
max_iterations = 6
|
| 133 |
original_text = input_text
|