Add pipeline tag and library name to model card

#1
by nielsr HF Staff - opened
Files changed (1) hide show
  1. README.md +8 -4
README.md CHANGED
@@ -1,10 +1,13 @@
1
  ---
2
- license: apache-2.0
3
- language:
4
- - en
5
  base_model:
6
  - Qwen/Qwen3-8B
 
 
 
 
 
7
  ---
 
8
  <div align="center">
9
 
10
  # 🧩 ReForm: Reflective Autoformalization with Prospective Bounded Sequence Optimization
@@ -50,7 +53,8 @@ model_name = "GuoxinChen/ReForm-8B" # or "GuoxinChen/ReForm-32B"
50
  tokenizer = AutoTokenizer.from_pretrained(model_name)
51
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype="auto", device_map="auto")
52
 
53
- prompt = "Think step by step to translate the mathematical problem in natural language to Lean 4, and verify the consistency.\nLet $a_1, a_2,\\cdots, a_n$ be real constants, $x$ a real variable, and $f(x)=\\cos(a_1+x)+\\frac{1}{2}\\cos(a_2+x)+\\frac{1}{4}\\cos(a_3+x)+\\cdots+\\frac{1}{2^{n-1}}\\cos(a_n+x).$ Given that $f(x_1)=f(x_2)=0,$ prove that $x_2-x_1=m\\pi$ for some integer $m.$"
 
54
 
55
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
56
  outputs = model.generate(**inputs, max_new_tokens=512)
 
1
  ---
 
 
 
2
  base_model:
3
  - Qwen/Qwen3-8B
4
+ language:
5
+ - en
6
+ license: apache-2.0
7
+ pipeline_tag: text-generation
8
+ library_name: transformers
9
  ---
10
+
11
  <div align="center">
12
 
13
  # 🧩 ReForm: Reflective Autoformalization with Prospective Bounded Sequence Optimization
 
53
  tokenizer = AutoTokenizer.from_pretrained(model_name)
54
  model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype="auto", device_map="auto")
55
 
56
+ prompt = "Think step by step to translate the mathematical problem in natural language to Lean 4, and verify the consistency.
57
+ Let $a_1, a_2,\\cdots, a_n$ be real constants, $x$ a real variable, and $f(x)=\\cos(a_1+x)+\\frac{1}{2}\\cos(a_2+x)+\\frac{1}{4}\\cos(a_3+x)+\\cdots+\\frac{1}{2^{n-1}}\\cos(a_n+x).$ Given that $f(x_1)=f(x_2)=0,$ prove that $x_2-x_1=m\\pi$ for some integer $m.$"
58
 
59
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
60
  outputs = model.generate(**inputs, max_new_tokens=512)