Spaces:
Sleeping
Sleeping
change data
Browse files
app.py
CHANGED
|
@@ -33,7 +33,7 @@ summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
|
|
| 33 |
|
| 34 |
|
| 35 |
|
| 36 |
-
def split_into_token_chunks(text: str, max_tokens: int =
|
| 37 |
"""
|
| 38 |
Splits a long string into chunks of a specified maximum number of tokens (words).
|
| 39 |
|
|
@@ -88,7 +88,7 @@ async def ppt_content(data):
|
|
| 88 |
# print(f"Chunk {i}:\n{chunk}\n")
|
| 89 |
|
| 90 |
|
| 91 |
-
result = agent.run_sync(user_prompt = f"Create me a powerpoint presentation
|
| 92 |
message_history = message_history,
|
| 93 |
deps=deps,
|
| 94 |
)
|
|
@@ -110,7 +110,7 @@ def ai_ppt(data):
|
|
| 110 |
summary_texts = []
|
| 111 |
listOfString = split_into_token_chunks("".join(data))
|
| 112 |
for x in listOfString:
|
| 113 |
-
summary = summarizer("".join(data), max_length=
|
| 114 |
summary_texts .append([item['summary_text'] for item in summary])
|
| 115 |
print(summary_texts)
|
| 116 |
|
|
|
|
| 33 |
|
| 34 |
|
| 35 |
|
| 36 |
+
def split_into_token_chunks(text: str, max_tokens: int = 6000) -> list:
|
| 37 |
"""
|
| 38 |
Splits a long string into chunks of a specified maximum number of tokens (words).
|
| 39 |
|
|
|
|
| 88 |
# print(f"Chunk {i}:\n{chunk}\n")
|
| 89 |
|
| 90 |
|
| 91 |
+
result = agent.run_sync(user_prompt = f"Create me a powerpoint presentation" + "".join(data),
|
| 92 |
message_history = message_history,
|
| 93 |
deps=deps,
|
| 94 |
)
|
|
|
|
| 110 |
summary_texts = []
|
| 111 |
listOfString = split_into_token_chunks("".join(data))
|
| 112 |
for x in listOfString:
|
| 113 |
+
summary = summarizer("".join(data), max_length=700, min_length=100, truncation=True,do_sample=False)
|
| 114 |
summary_texts .append([item['summary_text'] for item in summary])
|
| 115 |
print(summary_texts)
|
| 116 |
|