SandaAbhishekSagar commited on
Commit
687bf24
·
1 Parent(s): 50936f2

revamped code

Browse files
Files changed (2) hide show
  1. image_generator.py +11 -24
  2. translate.py +4 -3
image_generator.py CHANGED
@@ -31,34 +31,21 @@
31
  # print("Generated Image Path:", generate_image(prompt))
32
 
33
 
34
- # from diffusers import StableDiffusionPipeline
35
- # import torch
36
-
37
- # # Preload the model globally
38
- # device = "cuda" if torch.cuda.is_available() else "cpu"
39
- # model = StableDiffusionPipeline.from_pretrained(
40
- # "stabilityai/stable-diffusion-2-1-base",
41
- # torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
42
- # )
43
- # model.to(device)
44
-
45
- # def generate_image(prompt):
46
- # """Generate an image from a text prompt."""
47
- # image = model(prompt).images[0]
48
- # output_path = "output.png"
49
- # image.save(output_path)
50
- # return output_path
51
-
52
  from diffusers import StableDiffusionPipeline
53
  import torch
54
 
55
- # Load the Stable Diffusion pipeline on the CPU
56
- pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float32)
57
- pipe.to("cpu") # Force the pipeline to run on the CPU
 
 
 
58
 
59
  def generate_image(prompt):
60
- """Generate an image from a text prompt."""
61
- image = pipe(prompt).images[0]
62
- image_path = "output_image.png"
 
 
63
  image.save(image_path)
64
  return image_path
 
31
  # print("Generated Image Path:", generate_image(prompt))
32
 
33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  from diffusers import StableDiffusionPipeline
35
  import torch
36
 
37
+ # Preload the model globally
38
+ device = "cuda" if torch.cuda.is_available() else "cpu"
39
+ model = StableDiffusionPipeline.from_pretrained(
40
+ "runwayml/stable-diffusion-v1-5"
41
+ )
42
+ pipeline = pipeline.to(device)
43
 
44
  def generate_image(prompt):
45
+ """Generate an image based on the input prompt."""
46
+ with torch.no_grad():
47
+ image = pipeline(prompt).images[0]
48
+ # Save the image locally and return the file path
49
+ image_path = "generated_image.png"
50
  image.save(image_path)
51
  return image_path
translate.py CHANGED
@@ -25,13 +25,14 @@
25
  # inputs = tokenizer(text, return_tensors="pt", padding=True)
26
  # translated = translation_model.generate(**inputs)
27
  # return tokenizer.decode(translated[0], skip_special_tokens=True)
28
-
29
  from transformers import MarianMTModel, MarianTokenizer
30
 
31
  # Preload the translation model globally
 
32
  model_name = "Helsinki-NLP/opus-mt-mul-en"
33
  tokenizer = MarianTokenizer.from_pretrained(model_name)
34
- translation_model = MarianMTModel.from_pretrained(model_name)
35
 
36
  # Define supported languages for the dropdown
37
  SUPPORTED_LANGUAGES = {
@@ -75,7 +76,7 @@ def translate_text(text, src_lang, tgt_lang="en"):
75
  translation_model = MarianMTModel.from_pretrained(model_name)
76
 
77
  # Tokenize the input text
78
- inputs = tokenizer(text, return_tensors="pt", padding=True)
79
 
80
  # Perform translation
81
  translated = translation_model.generate(**inputs)
 
25
  # inputs = tokenizer(text, return_tensors="pt", padding=True)
26
  # translated = translation_model.generate(**inputs)
27
  # return tokenizer.decode(translated[0], skip_special_tokens=True)
28
+ import torch
29
  from transformers import MarianMTModel, MarianTokenizer
30
 
31
  # Preload the translation model globally
32
+ device = "cuda" if torch.cuda.is_available() else "cpu"
33
  model_name = "Helsinki-NLP/opus-mt-mul-en"
34
  tokenizer = MarianTokenizer.from_pretrained(model_name)
35
+ translation_model = MarianMTModel.from_pretrained(model_name).to(device)
36
 
37
  # Define supported languages for the dropdown
38
  SUPPORTED_LANGUAGES = {
 
76
  translation_model = MarianMTModel.from_pretrained(model_name)
77
 
78
  # Tokenize the input text
79
+ inputs = tokenizer(text, return_tensors="pt", padding=True).to(device)
80
 
81
  # Perform translation
82
  translated = translation_model.generate(**inputs)