Update README.md
Browse files
README.md
CHANGED
|
@@ -134,7 +134,7 @@ const messages = [
|
|
| 134 |
const output = await generator(messages, {
|
| 135 |
max_new_tokens: 512,
|
| 136 |
do_sample: false,
|
| 137 |
-
streamer: new TextStreamer(generator.tokenizer, { skip_prompt: true, skip_special_tokens: true}),
|
| 138 |
});
|
| 139 |
console.log(output[0].generated_text.at(-1).content);
|
| 140 |
// The capital of France is Paris.
|
|
@@ -150,9 +150,10 @@ import { AutoModelForCausalLM, AutoTokenizer, TextStreamer } from "@huggingface/
|
|
| 150 |
const model_id = "onnx-community/LFM2-1.2B-ONNX";
|
| 151 |
const tokenizer = await AutoTokenizer.from_pretrained(model_id);
|
| 152 |
const model = await AutoModelForCausalLM.from_pretrained(
|
| 153 |
-
model_id, { dtype: "
|
| 154 |
);
|
| 155 |
|
|
|
|
| 156 |
const tools = [
|
| 157 |
{
|
| 158 |
name: "get_weather",
|
|
@@ -181,25 +182,19 @@ const messages = [
|
|
| 181 |
},
|
| 182 |
];
|
| 183 |
|
| 184 |
-
//
|
| 185 |
const input = tokenizer.apply_chat_template(messages, {
|
| 186 |
tools,
|
| 187 |
add_generation_prompt: true,
|
| 188 |
return_dict: true,
|
| 189 |
});
|
| 190 |
|
| 191 |
-
// Set up the streamer
|
| 192 |
-
const streamer = new TextStreamer(tokenizer, {
|
| 193 |
-
skip_prompt: true,
|
| 194 |
-
skip_special_tokens: false,
|
| 195 |
-
});
|
| 196 |
-
|
| 197 |
// Generate output
|
| 198 |
const sequences = await model.generate({
|
| 199 |
...input,
|
| 200 |
max_new_tokens: 512,
|
| 201 |
do_sample: false,
|
| 202 |
-
streamer,
|
| 203 |
});
|
| 204 |
|
| 205 |
// Decode and print the generated text
|
|
|
|
| 134 |
const output = await generator(messages, {
|
| 135 |
max_new_tokens: 512,
|
| 136 |
do_sample: false,
|
| 137 |
+
streamer: new TextStreamer(generator.tokenizer, { skip_prompt: true, skip_special_tokens: true }),
|
| 138 |
});
|
| 139 |
console.log(output[0].generated_text.at(-1).content);
|
| 140 |
// The capital of France is Paris.
|
|
|
|
| 150 |
const model_id = "onnx-community/LFM2-1.2B-ONNX";
|
| 151 |
const tokenizer = await AutoTokenizer.from_pretrained(model_id);
|
| 152 |
const model = await AutoModelForCausalLM.from_pretrained(
|
| 153 |
+
model_id, { dtype: "q4", device: "webgpu" },
|
| 154 |
);
|
| 155 |
|
| 156 |
+
// Define tools and messages
|
| 157 |
const tools = [
|
| 158 |
{
|
| 159 |
name: "get_weather",
|
|
|
|
| 182 |
},
|
| 183 |
];
|
| 184 |
|
| 185 |
+
// Prepare inputs
|
| 186 |
const input = tokenizer.apply_chat_template(messages, {
|
| 187 |
tools,
|
| 188 |
add_generation_prompt: true,
|
| 189 |
return_dict: true,
|
| 190 |
});
|
| 191 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 192 |
// Generate output
|
| 193 |
const sequences = await model.generate({
|
| 194 |
...input,
|
| 195 |
max_new_tokens: 512,
|
| 196 |
do_sample: false,
|
| 197 |
+
streamer: new TextStreamer(tokenizer, { skip_prompt: true, skip_special_tokens: false }),
|
| 198 |
});
|
| 199 |
|
| 200 |
// Decode and print the generated text
|