|
|
import spaces |
|
|
import datetime |
|
|
import os |
|
|
import subprocess |
|
|
import torch |
|
|
import gradio as gr |
|
|
|
|
|
CUSTOM_CSS = """ |
|
|
#output_box textarea { |
|
|
font-family: IBM Plex Mono, ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; |
|
|
} |
|
|
""" |
|
|
|
|
|
zero = torch.Tensor([0]).cuda() |
|
|
print(zero.device) |
|
|
|
|
|
@spaces.GPU |
|
|
def run_gpu() -> str: |
|
|
print(zero.device) |
|
|
output: str = "" |
|
|
try: |
|
|
output = subprocess.check_output(["nvidia-smi"], text=True) |
|
|
except FileNotFoundError: |
|
|
output = "nvidia-smi failed" |
|
|
comment = ( |
|
|
datetime.datetime.now().replace(microsecond=0).isoformat().replace("T", " ") |
|
|
) |
|
|
return f"# {comment}\n\n{output}" |
|
|
|
|
|
def run(check: bool) -> str: |
|
|
if check: |
|
|
return run_gpu() |
|
|
else: |
|
|
comment = ( |
|
|
datetime.datetime.now().replace(microsecond=0).isoformat().replace("T", " ") |
|
|
) |
|
|
return f"# {comment}\n\nThis is running on CPU\n\nClick on 'Run on GPU' below to move to GPU instantly and run nvidia-smi" |
|
|
|
|
|
output = gr.Textbox( |
|
|
label="Command Output", max_lines=32, elem_id="output_box", value=run(False) |
|
|
) |
|
|
|
|
|
with gr.Blocks(css=CUSTOM_CSS) as demo: |
|
|
gr.Markdown("#### `zero-gpu`: how to run on serverless GPU for free on Spaces 🔥") |
|
|
|
|
|
output.render() |
|
|
|
|
|
check = gr.Checkbox(label="Run on GPU") |
|
|
|
|
|
check.change(run, inputs=[check], outputs=output, every=1) |
|
|
|
|
|
demo.queue().launch(show_api=False) |
|
|
|