Spaces:
Running
on
Zero
Running
on
Zero
Upload folder using huggingface_hub
Browse files- README.md +1 -1
- app.py +1 -0
- modules/Attention/AttentionMethods.py +2 -2
README.md
CHANGED
|
@@ -8,6 +8,7 @@ sdk_version: 5.14.0
|
|
| 8 |
|
| 9 |
# Say hi to LightDiffusion-Next 👋
|
| 10 |
|
|
|
|
| 11 |
|
| 12 |
**LightDiffusion-Next** is the fastest AI-powered image generation GUI/CLI, combining speed, precision, and flexibility in one cohesive tool.
|
| 13 |
</br>
|
|
@@ -19,7 +20,6 @@ sdk_version: 5.14.0
|
|
| 19 |
</br>
|
| 20 |
</div>
|
| 21 |
|
| 22 |
-
|
| 23 |
As a refactored and improved version of the original [LightDiffusion repository](https://github.com/Aatrick/LightDiffusion), this project enhances usability, maintainability, and functionality while introducing a host of new features to streamline your creative workflows.
|
| 24 |
|
| 25 |
## Motivation:
|
|
|
|
| 8 |
|
| 9 |
# Say hi to LightDiffusion-Next 👋
|
| 10 |
|
| 11 |
+
[](https://huggingface.co/spaces/Aatricks/LightDiffusion-Next)
|
| 12 |
|
| 13 |
**LightDiffusion-Next** is the fastest AI-powered image generation GUI/CLI, combining speed, precision, and flexibility in one cohesive tool.
|
| 14 |
</br>
|
|
|
|
| 20 |
</br>
|
| 21 |
</div>
|
| 22 |
|
|
|
|
| 23 |
As a refactored and improved version of the original [LightDiffusion repository](https://github.com/Aatrick/LightDiffusion), this project enhances usability, maintainability, and functionality while introducing a host of new features to streamline your creative workflows.
|
| 24 |
|
| 25 |
## Motivation:
|
app.py
CHANGED
|
@@ -91,6 +91,7 @@ def generate_images(
|
|
| 91 |
with gr.Blocks(title="LightDiffusion Web UI") as demo:
|
| 92 |
gr.Markdown("# LightDiffusion Web UI")
|
| 93 |
gr.Markdown("Generate AI images using LightDiffusion")
|
|
|
|
| 94 |
|
| 95 |
with gr.Row():
|
| 96 |
with gr.Column():
|
|
|
|
| 91 |
with gr.Blocks(title="LightDiffusion Web UI") as demo:
|
| 92 |
gr.Markdown("# LightDiffusion Web UI")
|
| 93 |
gr.Markdown("Generate AI images using LightDiffusion")
|
| 94 |
+
gr.Markdown("")
|
| 95 |
|
| 96 |
with gr.Row():
|
| 97 |
with gr.Column():
|
modules/Attention/AttentionMethods.py
CHANGED
|
@@ -6,7 +6,7 @@ import torch
|
|
| 6 |
|
| 7 |
|
| 8 |
def attention_xformers(
|
| 9 |
-
q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, heads: int, mask=None
|
| 10 |
) -> torch.Tensor:
|
| 11 |
"""#### Make an attention call using xformers. Fastest attention implementation.
|
| 12 |
|
|
@@ -44,7 +44,7 @@ def attention_xformers(
|
|
| 44 |
|
| 45 |
|
| 46 |
def attention_pytorch(
|
| 47 |
-
q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, heads: int, mask=None
|
| 48 |
) -> torch.Tensor:
|
| 49 |
"""#### Make an attention call using PyTorch.
|
| 50 |
|
|
|
|
| 6 |
|
| 7 |
|
| 8 |
def attention_xformers(
|
| 9 |
+
q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, heads: int, mask=None, skip_reshape=False
|
| 10 |
) -> torch.Tensor:
|
| 11 |
"""#### Make an attention call using xformers. Fastest attention implementation.
|
| 12 |
|
|
|
|
| 44 |
|
| 45 |
|
| 46 |
def attention_pytorch(
|
| 47 |
+
q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, heads: int, mask=None, skip_reshape=False
|
| 48 |
) -> torch.Tensor:
|
| 49 |
"""#### Make an attention call using PyTorch.
|
| 50 |
|