desert
commited on
Commit
·
f84cd21
1
Parent(s):
fa564e5
del
Browse files
app.py
CHANGED
|
@@ -15,10 +15,6 @@ model, tokenizer = FastLanguageModel.from_pretrained(
|
|
| 15 |
load_in_4bit=load_in_4bit,
|
| 16 |
)
|
| 17 |
|
| 18 |
-
# Move model to GPU if available
|
| 19 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 20 |
-
model = model.to(device)
|
| 21 |
-
|
| 22 |
|
| 23 |
# Respond function
|
| 24 |
def respond(
|
|
@@ -48,7 +44,7 @@ def respond(
|
|
| 48 |
tokenize=True,
|
| 49 |
add_generation_prompt=True,
|
| 50 |
return_tensors="pt",
|
| 51 |
-
)
|
| 52 |
|
| 53 |
# Generate the response using your model
|
| 54 |
outputs = model.generate(
|
|
|
|
| 15 |
load_in_4bit=load_in_4bit,
|
| 16 |
)
|
| 17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
# Respond function
|
| 20 |
def respond(
|
|
|
|
| 44 |
tokenize=True,
|
| 45 |
add_generation_prompt=True,
|
| 46 |
return_tensors="pt",
|
| 47 |
+
)
|
| 48 |
|
| 49 |
# Generate the response using your model
|
| 50 |
outputs = model.generate(
|