Jingya HF Staff commited on
Commit
cb98597
·
verified ·
1 Parent(s): d4a7493

Upload config.json with huggingface_hub

Browse files
Files changed (1) hide show
  1. config.json +8 -15
config.json CHANGED
@@ -1,6 +1,4 @@
1
  {
2
- "_attn_implementation_autoset": true,
3
- "_name_or_path": "/tmp/tmp55lzbkct",
4
  "architectures": [
5
  "BertModel"
6
  ],
@@ -26,35 +24,30 @@
26
  "auto_cast": "matmul",
27
  "auto_cast_type": "bf16",
28
  "compiler_type": "neuronx-cc",
29
- "compiler_version": "2.16.372.0+4a9b2326",
30
  "disable_fallback": false,
31
  "disable_fast_relayout": false,
32
  "dynamic_batch_size": false,
 
33
  "inline_weights_to_neff": true,
34
- "input_names": [
35
- "input_ids",
36
- "attention_mask"
37
- ],
38
  "model_type": "transformer",
39
  "optlevel": "2",
40
  "output_attentions": false,
41
  "output_hidden_states": false,
42
- "output_names": [
43
- "token_embeddings",
44
- "sentence_embedding"
45
- ],
46
- "static_batch_size": 1,
47
- "static_sequence_length": 384,
48
  "tensor_parallel_size": 1
49
  },
50
  "num_attention_heads": 16,
51
  "num_hidden_layers": 24,
52
  "pad_token_id": 0,
53
  "position_embedding_type": "absolute",
54
- "task": "feature-extraction",
55
  "torch_dtype": "float32",
56
  "torchscript": true,
57
- "transformers_version": "4.49.0",
58
  "type_vocab_size": 2,
59
  "use_cache": true,
60
  "vocab_size": 30522
 
1
  {
 
 
2
  "architectures": [
3
  "BertModel"
4
  ],
 
24
  "auto_cast": "matmul",
25
  "auto_cast_type": "bf16",
26
  "compiler_type": "neuronx-cc",
27
+ "compiler_version": "2.21.18209.0+043b1bf7",
28
  "disable_fallback": false,
29
  "disable_fast_relayout": false,
30
  "dynamic_batch_size": false,
31
+ "float_dtype": "fp32",
32
  "inline_weights_to_neff": true,
33
+ "instance_type": "trn1",
34
+ "int_dtype": "int64",
 
 
35
  "model_type": "transformer",
36
  "optlevel": "2",
37
  "output_attentions": false,
38
  "output_hidden_states": false,
39
+ "static_batch_size": 2,
40
+ "static_sequence_length": 512,
41
+ "task": "feature-extraction",
 
 
 
42
  "tensor_parallel_size": 1
43
  },
44
  "num_attention_heads": 16,
45
  "num_hidden_layers": 24,
46
  "pad_token_id": 0,
47
  "position_embedding_type": "absolute",
 
48
  "torch_dtype": "float32",
49
  "torchscript": true,
50
+ "transformers_version": "4.55.4",
51
  "type_vocab_size": 2,
52
  "use_cache": true,
53
  "vocab_size": 30522