SandaAbhishekSagar
commited on
Commit
·
c68a9eb
1
Parent(s):
ab3ad5b
revamped code
Browse files- image_generator.py +2 -2
- translate.py +2 -1
image_generator.py
CHANGED
|
@@ -30,14 +30,14 @@
|
|
| 30 |
# prompt = "A friendly person saying 'How are you?'"
|
| 31 |
# print("Generated Image Path:", generate_image(prompt))
|
| 32 |
|
| 33 |
-
|
| 34 |
from diffusers import StableDiffusionPipeline
|
| 35 |
import torch
|
| 36 |
|
| 37 |
# Preload the model globally
|
| 38 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 39 |
pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5").to(device)
|
| 40 |
-
|
| 41 |
def generate_image(prompt):
|
| 42 |
"""Generate an image based on the input prompt."""
|
| 43 |
with torch.no_grad():
|
|
|
|
| 30 |
# prompt = "A friendly person saying 'How are you?'"
|
| 31 |
# print("Generated Image Path:", generate_image(prompt))
|
| 32 |
|
| 33 |
+
import spaces
|
| 34 |
from diffusers import StableDiffusionPipeline
|
| 35 |
import torch
|
| 36 |
|
| 37 |
# Preload the model globally
|
| 38 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 39 |
pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5").to(device)
|
| 40 |
+
@spaces.GPU
|
| 41 |
def generate_image(prompt):
|
| 42 |
"""Generate an image based on the input prompt."""
|
| 43 |
with torch.no_grad():
|
translate.py
CHANGED
|
@@ -25,6 +25,7 @@
|
|
| 25 |
# inputs = tokenizer(text, return_tensors="pt", padding=True)
|
| 26 |
# translated = translation_model.generate(**inputs)
|
| 27 |
# return tokenizer.decode(translated[0], skip_special_tokens=True)
|
|
|
|
| 28 |
import torch
|
| 29 |
from transformers import MarianMTModel, MarianTokenizer
|
| 30 |
|
|
@@ -64,7 +65,7 @@ SUPPORTED_LANGUAGES = {
|
|
| 64 |
"Indonesian": "id",
|
| 65 |
"Malay": "ms"
|
| 66 |
}
|
| 67 |
-
|
| 68 |
def translate_text(text, src_lang, tgt_lang="en"):
|
| 69 |
"""Translate text from the selected source language to English."""
|
| 70 |
if src_lang not in SUPPORTED_LANGUAGES:
|
|
|
|
| 25 |
# inputs = tokenizer(text, return_tensors="pt", padding=True)
|
| 26 |
# translated = translation_model.generate(**inputs)
|
| 27 |
# return tokenizer.decode(translated[0], skip_special_tokens=True)
|
| 28 |
+
import spaces
|
| 29 |
import torch
|
| 30 |
from transformers import MarianMTModel, MarianTokenizer
|
| 31 |
|
|
|
|
| 65 |
"Indonesian": "id",
|
| 66 |
"Malay": "ms"
|
| 67 |
}
|
| 68 |
+
@spaces.GPU
|
| 69 |
def translate_text(text, src_lang, tgt_lang="en"):
|
| 70 |
"""Translate text from the selected source language to English."""
|
| 71 |
if src_lang not in SUPPORTED_LANGUAGES:
|