xu-song commited on
Commit
ed8b0c6
·
1 Parent(s): 982d6e8
docs/chat-template/DeepSeek-R1/json_output.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+
3
+ """
4
+
5
+
6
+ import json
7
+ from openai import OpenAI
8
+
9
+ client = OpenAI(
10
+ api_key="<your api key>",
11
+ base_url="https://api.deepseek.com",
12
+ )
13
+
14
+ system_prompt = """
15
+ The user will provide some exam text. Please parse the "question" and "answer" and output them in JSON format.
16
+
17
+ EXAMPLE INPUT:
18
+ Which is the highest mountain in the world? Mount Everest.
19
+
20
+ EXAMPLE JSON OUTPUT:
21
+ {
22
+ "question": "Which is the highest mountain in the world?",
23
+ "answer": "Mount Everest"
24
+ }
25
+ """
26
+
27
+ user_prompt = "Which is the longest river in the world? The Nile River."
28
+
29
+ messages = [{"role": "system", "content": system_prompt},
30
+ {"role": "user", "content": user_prompt}]
31
+
32
+ response = client.chat.completions.create(
33
+ model="deepseek-chat",
34
+ messages=messages,
35
+ response_format={
36
+ 'type': 'json_object'
37
+ }
38
+ )
39
+
40
+ print(json.loads(response.choices[0].message.content))
docs/chat-template/DeepSeek-R1/tool_demo.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ https://github.com/deepseek-ai/DeepSeek-R1/issues/9
3
+
4
+
5
+ ## reference
6
+ - https://api-docs.deepseek.com/guides/function_calling
7
+ - s
8
+
9
+ """
10
+
11
+
12
+
13
+
14
+ from openai import OpenAI
15
+
16
+ def send_messages(messages):
17
+ response = client.chat.completions.create(
18
+ model="deepseek-chat",
19
+ messages=messages,
20
+ tools=tools
21
+ )
22
+ return response.choices[0].message
23
+
24
+ client = OpenAI(
25
+ api_key="<your api key>",
26
+ base_url="https://api.deepseek.com",
27
+ )
28
+
29
+ tools = [
30
+ {
31
+ "type": "function",
32
+ "function": {
33
+ "name": "get_weather",
34
+ "description": "Get weather of a location, the user should supply a location first.",
35
+ "parameters": {
36
+ "type": "object",
37
+ "properties": {
38
+ "location": {
39
+ "type": "string",
40
+ "description": "The city and state, e.g. San Francisco, CA",
41
+ }
42
+ },
43
+ "required": ["location"]
44
+ },
45
+ }
46
+ },
47
+ ]
48
+
49
+ messages = [{"role": "user", "content": "How's the weather in Hangzhou?"}]
50
+ message = send_messages(messages)
51
+ print(f"User>\t {messages[0]['content']}")
52
+
53
+ tool = message.tool_calls[0]
54
+ messages.append(message)
55
+
56
+ messages.append({"role": "tool", "tool_call_id": tool.id, "content": "24℃"})
57
+ message = send_messages(messages)
58
+ print(f"Model>\t {message.content}")
59
+
docs/chat-template/DeepSeek-V3.1/enable_thinking.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer
2
+
3
+ MODEL_PATH = "deepseek-ai/DeepSeek-V3.1"
4
+
5
+
6
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
7
+
8
+
9
+ messages = [
10
+ {"role": "system", "content": "You are a bot that responds to weather queries."},
11
+ {"role": "user", "content": "Hey, what's the temperature in Paris right now?"},
12
+ {"role": "assistant", "content": "2"},
13
+ {"role": "user", "content": "3"},
14
+ ]
15
+
16
+ prompt = tokenizer.apply_chat_template(
17
+ conversation=messages,
18
+ tokenize=False,
19
+ thinking=True,
20
+ add_generation_prompt=True,
21
+ )
22
+
23
+ print(prompt)
docs/chat-template/Qwen3-235B-A22B-Instruct-2507/chat_template.jinja ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0].role == 'system' %}
14
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
15
+ {%- endif %}
16
+ {%- endif %}
17
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
18
+ {%- for message in messages[::-1] %}
19
+ {%- set index = (messages|length - 1) - loop.index0 %}
20
+ {%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
21
+ {%- set ns.multi_step_tool = false %}
22
+ {%- set ns.last_query_index = index %}
23
+ {%- endif %}
24
+ {%- endfor %}
25
+ {%- for message in messages %}
26
+ {%- if message.content is string %}
27
+ {%- set content = message.content %}
28
+ {%- else %}
29
+ {%- set content = '' %}
30
+ {%- endif %}
31
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
32
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
33
+ {%- elif message.role == "assistant" %}
34
+ {%- set reasoning_content = '' %}
35
+ {%- if message.reasoning_content is string %}
36
+ {%- set reasoning_content = message.reasoning_content %}
37
+ {%- else %}
38
+ {%- if '</think>' in content %}
39
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
40
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
41
+ {%- endif %}
42
+ {%- endif %}
43
+ {%- if loop.index0 > ns.last_query_index %}
44
+ {%- if loop.last or (not loop.last and reasoning_content) %}
45
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
46
+ {%- else %}
47
+ {{- '<|im_start|>' + message.role + '\n' + content }}
48
+ {%- endif %}
49
+ {%- else %}
50
+ {{- '<|im_start|>' + message.role + '\n' + content }}
51
+ {%- endif %}
52
+ {%- if message.tool_calls %}
53
+ {%- for tool_call in message.tool_calls %}
54
+ {%- if (loop.first and content) or (not loop.first) %}
55
+ {{- '\n' }}
56
+ {%- endif %}
57
+ {%- if tool_call.function %}
58
+ {%- set tool_call = tool_call.function %}
59
+ {%- endif %}
60
+ {{- '<tool_call>\n{"name": "' }}
61
+ {{- tool_call.name }}
62
+ {{- '", "arguments": ' }}
63
+ {%- if tool_call.arguments is string %}
64
+ {{- tool_call.arguments }}
65
+ {%- else %}
66
+ {{- tool_call.arguments | tojson }}
67
+ {%- endif %}
68
+ {{- '}\n</tool_call>' }}
69
+ {%- endfor %}
70
+ {%- endif %}
71
+ {{- '<|im_end|>\n' }}
72
+ {%- elif message.role == "tool" %}
73
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
74
+ {{- '<|im_start|>user' }}
75
+ {%- endif %}
76
+ {{- '\n<tool_response>\n' }}
77
+ {{- content }}
78
+ {{- '\n</tool_response>' }}
79
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
80
+ {{- '<|im_end|>\n' }}
81
+ {%- endif %}
82
+ {%- endif %}
83
+ {%- endfor %}
84
+ {%- if add_generation_prompt %}
85
+ {{- '<|im_start|>assistant\n' }}
86
+ {%- endif %}
docs/chat-template/Qwen3-235B-A22B-Thinking-2507/chat_template.jinja ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0].role == 'system' %}
14
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
15
+ {%- endif %}
16
+ {%- endif %}
17
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
18
+ {%- for message in messages[::-1] %}
19
+ {%- set index = (messages|length - 1) - loop.index0 %}
20
+ {%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
21
+ {%- set ns.multi_step_tool = false %}
22
+ {%- set ns.last_query_index = index %}
23
+ {%- endif %}
24
+ {%- endfor %}
25
+ {%- for message in messages %}
26
+ {%- if message.content is string %}
27
+ {%- set content = message.content %}
28
+ {%- else %}
29
+ {%- set content = '' %}
30
+ {%- endif %}
31
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
32
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
33
+ {%- elif message.role == "assistant" %}
34
+ {%- set reasoning_content = '' %}
35
+ {%- if message.reasoning_content is string %}
36
+ {%- set reasoning_content = message.reasoning_content %}
37
+ {%- else %}
38
+ {%- if '</think>' in content %}
39
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
40
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
41
+ {%- endif %}
42
+ {%- endif %}
43
+ {%- if loop.index0 > ns.last_query_index %}
44
+ {%- if loop.last or (not loop.last and reasoning_content) %}
45
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
46
+ {%- else %}
47
+ {{- '<|im_start|>' + message.role + '\n' + content }}
48
+ {%- endif %}
49
+ {%- else %}
50
+ {{- '<|im_start|>' + message.role + '\n' + content }}
51
+ {%- endif %}
52
+ {%- if message.tool_calls %}
53
+ {%- for tool_call in message.tool_calls %}
54
+ {%- if (loop.first and content) or (not loop.first) %}
55
+ {{- '\n' }}
56
+ {%- endif %}
57
+ {%- if tool_call.function %}
58
+ {%- set tool_call = tool_call.function %}
59
+ {%- endif %}
60
+ {{- '<tool_call>\n{"name": "' }}
61
+ {{- tool_call.name }}
62
+ {{- '", "arguments": ' }}
63
+ {%- if tool_call.arguments is string %}
64
+ {{- tool_call.arguments }}
65
+ {%- else %}
66
+ {{- tool_call.arguments | tojson }}
67
+ {%- endif %}
68
+ {{- '}\n</tool_call>' }}
69
+ {%- endfor %}
70
+ {%- endif %}
71
+ {{- '<|im_end|>\n' }}
72
+ {%- elif message.role == "tool" %}
73
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
74
+ {{- '<|im_start|>user' }}
75
+ {%- endif %}
76
+ {{- '\n<tool_response>\n' }}
77
+ {{- content }}
78
+ {{- '\n</tool_response>' }}
79
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
80
+ {{- '<|im_end|>\n' }}
81
+ {%- endif %}
82
+ {%- endif %}
83
+ {%- endfor %}
84
+ {%- if add_generation_prompt %}
85
+ {{- '<|im_start|>assistant\n<think>\n' }}
86
+ {%- endif %}
docs/chat-template/Qwen3-235B-A22B/chat_template.jinja ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0].role == 'system' %}
14
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
15
+ {%- endif %}
16
+ {%- endif %}
17
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
18
+ {%- for message in messages[::-1] %}
19
+ {%- set index = (messages|length - 1) - loop.index0 %}
20
+ {%- if ns.multi_step_tool and message.role == "user" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
21
+ {%- set ns.multi_step_tool = false %}
22
+ {%- set ns.last_query_index = index %}
23
+ {%- endif %}
24
+ {%- endfor %}
25
+ {%- for message in messages %}
26
+ {%- if message.content is string %}
27
+ {%- set content = message.content %}
28
+ {%- else %}
29
+ {%- set content = '' %}
30
+ {%- endif %}
31
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
32
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
33
+ {%- elif message.role == "assistant" %}
34
+ {%- set reasoning_content = '' %}
35
+ {%- if message.reasoning_content is string %}
36
+ {%- set reasoning_content = message.reasoning_content %}
37
+ {%- else %}
38
+ {%- if '</think>' in content %}
39
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
40
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
41
+ {%- endif %}
42
+ {%- endif %}
43
+ {%- if loop.index0 > ns.last_query_index %}
44
+ {%- if loop.last or (not loop.last and reasoning_content) %}
45
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
46
+ {%- else %}
47
+ {{- '<|im_start|>' + message.role + '\n' + content }}
48
+ {%- endif %}
49
+ {%- else %}
50
+ {{- '<|im_start|>' + message.role + '\n' + content }}
51
+ {%- endif %}
52
+ {%- if message.tool_calls %}
53
+ {%- for tool_call in message.tool_calls %}
54
+ {%- if (loop.first and content) or (not loop.first) %}
55
+ {{- '\n' }}
56
+ {%- endif %}
57
+ {%- if tool_call.function %}
58
+ {%- set tool_call = tool_call.function %}
59
+ {%- endif %}
60
+ {{- '<tool_call>\n{"name": "' }}
61
+ {{- tool_call.name }}
62
+ {{- '", "arguments": ' }}
63
+ {%- if tool_call.arguments is string %}
64
+ {{- tool_call.arguments }}
65
+ {%- else %}
66
+ {{- tool_call.arguments | tojson }}
67
+ {%- endif %}
68
+ {{- '}\n</tool_call>' }}
69
+ {%- endfor %}
70
+ {%- endif %}
71
+ {{- '<|im_end|>\n' }}
72
+ {%- elif message.role == "tool" %}
73
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
74
+ {{- '<|im_start|>user' }}
75
+ {%- endif %}
76
+ {{- '\n<tool_response>\n' }}
77
+ {{- content }}
78
+ {{- '\n</tool_response>' }}
79
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
80
+ {{- '<|im_end|>\n' }}
81
+ {%- endif %}
82
+ {%- endif %}
83
+ {%- endfor %}
84
+ {%- if add_generation_prompt %}
85
+ {{- '<|im_start|>assistant\n' }}
86
+ {%- if enable_thinking is defined and enable_thinking is false %}
87
+ {{- '<think>\n\n</think>\n\n' }}
88
+ {%- endif %}
89
+ {%- endif %}
docs/chat-template/Qwen3-235B-A22B/enable_thinking.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer
2
+
3
+
4
+ MODEL_PATH = "Qwen/Qwen3-235B-A22B"
5
+
6
+
7
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
8
+
9
+
10
+ messages = [
11
+ {"role": "system", "content": "You are a bot that responds to weather queries."},
12
+ {"role": "user", "content": "Hey, what's the temperature in Paris right now?"},
13
+ {"role": "assistant", "content": "2"},
14
+ {"role": "user", "content": "3"},
15
+ ]
16
+
17
+ prompt = tokenizer.apply_chat_template(
18
+ conversation=messages,
19
+ tokenize=False,
20
+ enable_thinking=False, # enable_thinking 默认是True
21
+ add_generation_prompt=True,
22
+ )
23
+
24
+ print(prompt)
docs/chat-template/export_chat_template.py CHANGED
@@ -9,11 +9,13 @@ from transformers import AutoTokenizer
9
  # MODEL_PATH = "../../test/Llama-4-Maverick-17B-128E-Instruct"
10
  # MODEL_PATH = "meta-llama/Llama-4-Maverick-17B-128E-Instruct"
11
  # MODEL_PATH = "Qwen/Qwen3-235B-A22B-Instruct-2507"
 
 
12
  # MODEL_PATH = "mistralai/Mistral-7B-Instruct-v0.1" # messages里不支持tool_calls,不支持 role=tool,不支持 tools
13
  # MODEL_PATH = "mistralai/Ministral-8B-Instruct-2410" # 支持 tools, 支持tool_calls(必须要有id), 格式非主流
14
  # MODEL_PATH = "deepseek-ai/DeepSeek-R1"
15
  # MODEL_PATH = "deepseek-ai/DeepSeek-R1-0528"
16
- MODEL_PATH = 'deepseek-ai/DeepSeek-V3.1'
17
  # MODEL_PATH = "google/gemma-3-27b-it"
18
 
19
 
 
9
  # MODEL_PATH = "../../test/Llama-4-Maverick-17B-128E-Instruct"
10
  # MODEL_PATH = "meta-llama/Llama-4-Maverick-17B-128E-Instruct"
11
  # MODEL_PATH = "Qwen/Qwen3-235B-A22B-Instruct-2507"
12
+ # MODEL_PATH = "Qwen/Qwen3-235B-A22B-Thinking-2507"
13
+ MODEL_PATH = "Qwen/Qwen3-235B-A22B"
14
  # MODEL_PATH = "mistralai/Mistral-7B-Instruct-v0.1" # messages里不支持tool_calls,不支持 role=tool,不支持 tools
15
  # MODEL_PATH = "mistralai/Ministral-8B-Instruct-2410" # 支持 tools, 支持tool_calls(必须要有id), 格式非主流
16
  # MODEL_PATH = "deepseek-ai/DeepSeek-R1"
17
  # MODEL_PATH = "deepseek-ai/DeepSeek-R1-0528"
18
+ # MODEL_PATH = 'deepseek-ai/DeepSeek-V3.1'
19
  # MODEL_PATH = "google/gemma-3-27b-it"
20
 
21
 
docs/chat-template/{tool_call_and_tool_response.md → tool_call_and_tool_result.md} RENAMED
@@ -1,9 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
1
 
2
  ## 简介
3
 
4
 
5
- - `tool_calls`:
6
- - `tool_response`:
7
 
8
 
9
 
 
1
+ ---
2
+ title: 工具调用(tool_call) 与 工具结果 tool_response
3
+ ---
4
+
5
+ - [简介](#简介)
6
+ - [messages](#messages)
7
+ - [llama3.1-405b](#llama31-405b)
8
+ - [Hermes-3-Llama-3.1-405B](#hermes-3-llama-31-405b)
9
+ - [Qwen3](#qwen3)
10
+ - [参考](#参考)
11
+
12
 
13
  ## 简介
14
 
15
 
16
+ 1. `tool_calls`: 调用工具的入参
17
+ 2. `tool_response`: 调用工具的结果,也叫 `tool_result`
18
 
19
 
20
 
docs/chat-template/tool_demo.py CHANGED
@@ -17,15 +17,18 @@ from transformers.utils import get_json_schema
17
 
18
 
19
  # MODEL_PATH = "meta-llama/Llama-3.1-405B-Instruct"
20
- MODEL_PATH = "NousResearch/Hermes-3-Llama-3.1-405B" # messages里的tool_calls必须要有content字段
21
  # MODEL_PATH = "../../test/Llama-4-Maverick-17B-128E-Instruct/"
22
  # MODEL_PATH = "meta-llama/Llama-4-Maverick-17B-128E-Instruct"
23
  # MODEL_PATH = "Qwen/Qwen3-235B-A22B-Instruct-2507"
24
  # MODEL_PATH = "mistralai/Mistral-7B-Instruct-v0.1" # messages里不支持tool_calls,不支持 role=tool,不支持 tools
25
  # MODEL_PATH = "mistralai/Ministral-8B-Instruct-2410" # 支持 tools, 支持tool_calls(必须要有id), 格式非主流
26
  # MODEL_PATH = "deepseek-ai/DeepSeek-R1" # 不支持tools,tool_calls也有问题
 
27
  # MODEL_PATH = "google/gemma-3-27b-it" # 不支持任何tool
28
  # MODEL_PATH = "moonshotai/Kimi-K2-Instruct"
 
 
29
  tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
30
 
31
  # First, define a tool
 
17
 
18
 
19
  # MODEL_PATH = "meta-llama/Llama-3.1-405B-Instruct"
20
+ # MODEL_PATH = "NousResearch/Hermes-3-Llama-3.1-405B" # messages里的tool_calls必须要有content字段
21
  # MODEL_PATH = "../../test/Llama-4-Maverick-17B-128E-Instruct/"
22
  # MODEL_PATH = "meta-llama/Llama-4-Maverick-17B-128E-Instruct"
23
  # MODEL_PATH = "Qwen/Qwen3-235B-A22B-Instruct-2507"
24
  # MODEL_PATH = "mistralai/Mistral-7B-Instruct-v0.1" # messages里不支持tool_calls,不支持 role=tool,不支持 tools
25
  # MODEL_PATH = "mistralai/Ministral-8B-Instruct-2410" # 支持 tools, 支持tool_calls(必须要有id), 格式非主流
26
  # MODEL_PATH = "deepseek-ai/DeepSeek-R1" # 不支持tools,tool_calls也有问题
27
+ # MODEL_PATH = "deepseek-ai/DeepSeek-V3.1"
28
  # MODEL_PATH = "google/gemma-3-27b-it" # 不支持任何tool
29
  # MODEL_PATH = "moonshotai/Kimi-K2-Instruct"
30
+ MODEL_PATH = "xai-org/grok-2"
31
+
32
  tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
33
 
34
  # First, define a tool
docs/chat-template/tools_and_llm_response.md CHANGED
@@ -3,7 +3,8 @@ title: 支持的工具列表 & llm返回的工具调用
3
  ---
4
 
5
 
6
- - [LLM入参 = 工具列表](#llm入参--工具列表)
 
7
  - [LLM出参 = 工具名 + 参数](#llm出参--工具名--参数)
8
  - [LLM入参示例:messages and tools](#llm入参示例messages-and-tools)
9
  - [LLM入参示例:转化为 prompt 字符串](#llm入参示例转化为-prompt-字符串)
@@ -12,12 +13,30 @@ title: 支持的工具列表 & llm返回的工具调用
12
  - [llama4 ⭐️⭐️⭐️](#llama4-️️️)
13
  - [mistralai/Ministral-8B-Instruct-2410](#mistralaiministral-8b-instruct-2410)
14
  - [qwen3 ⭐️⭐️⭐️⭐️⭐️](#qwen3-️️️️️)
15
- - [deepseek-r1](#deepseek-r1)
16
- - [deepseek-v3.1](#deepseek-v31)
 
 
17
 
 
18
 
19
 
20
- ## LLM入参 = 工具列表
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  一般都是json格式,也有非json格式
23
 
@@ -224,6 +243,7 @@ Hey, what's the temperature in Paris right now?<|eot|><|header_start|>assistant<
224
 
225
  跟 llama3.1差不多,只是少了`date`,并且换了`special_token`。(同样拼在第一个user轮)
226
 
 
227
  - **推荐指数**: ⭐️⭐️⭐️
228
 
229
 
@@ -273,7 +293,27 @@ Hey, what's the temperature in Paris right now?<|im_end|>
273
  - **推荐指数**: ⭐️⭐️⭐️⭐️⭐️
274
 
275
 
276
- ### deepseek-r1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
277
 
278
- ### deepseek-v3.1
279
 
 
 
3
  ---
4
 
5
 
6
+ - [tools调用](#tools调用)
7
+ - [LLM入参 = messages + tools](#llm入参--messages--tools)
8
  - [LLM出参 = 工具名 + 参数](#llm出参--工具名--参数)
9
  - [LLM入参示例:messages and tools](#llm入参示例messages-and-tools)
10
  - [LLM入参示例:转化为 prompt 字符串](#llm入参示例转化为-prompt-字符串)
 
13
  - [llama4 ⭐️⭐️⭐️](#llama4-️️️)
14
  - [mistralai/Ministral-8B-Instruct-2410](#mistralaiministral-8b-instruct-2410)
15
  - [qwen3 ⭐️⭐️⭐️⭐️⭐️](#qwen3-️️️️️)
16
+ - [deepseek-r1 ⭐️⭐️](#deepseek-r1-️️)
17
+ - [deepseek-v3.1 ⭐️⭐️](#deepseek-v31-️️)
18
+ - [gemma-3-27b-it ⭐️](#gemma-3-27b-it-️)
19
+ - [grok-2 ⭐️](#grok-2-️)
20
 
21
+ ## tools调用
22
 
23
 
24
+ ```py
25
+ # openai chat api
26
+ completion = client.chat.completions.create(
27
+ model=model_name,
28
+ messages=messages,
29
+ tools=tools, # tool list defined above
30
+ tool_choice="auto"
31
+ )
32
+ # open response api
33
+
34
+ ```
35
+
36
+
37
+ ## LLM入参 = messages + tools
38
+
39
+ tools是支持的工具列表
40
 
41
  一般都是json格式,也有非json格式
42
 
 
243
 
244
  跟 llama3.1差不多,只是少了`date`,并且换了`special_token`。(同样拼在第一个user轮)
245
 
246
+ - **评价**: 同 llama3.1
247
  - **推荐指数**: ⭐️⭐️⭐️
248
 
249
 
 
293
  - **推荐指数**: ⭐️⭐️⭐️⭐️⭐️
294
 
295
 
296
+ ### deepseek-r1 ⭐️⭐️
297
+
298
+
299
+ ```sh
300
+ <|begin▁of▁sentence|>You are a bot that responds to weather queries.<|User|>Hey, what's the temperature in Paris right now?<|Assistant|><think>
301
+ ```
302
+
303
+ - **评价**:
304
+ - 扣分项: 不支持tools,tool_calls也有问题
305
+ - **推荐指数**: ⭐️⭐️
306
+
307
+
308
+ ### deepseek-v3.1 ⭐️⭐️
309
+
310
+ 与 deepseek-r1 一样
311
+
312
+
313
+ ### gemma-3-27b-it ⭐️
314
+
315
+ 不支持任何tool
316
 
317
+ ### grok-2 ⭐️
318
 
319
+ 不支持任何tool