Training in progress, epoch 1
Browse files- .gitattributes +1 -0
- README.md +58 -0
- adapter_config.json +66 -0
- adapter_model.safetensors +3 -0
- chat_template.jinja +49 -0
- preprocessor_config.json +51 -0
- processor_config.json +5 -0
- runs/Jul11_13-38-35_shivneri/events.out.tfevents.1752221320.shivneri +3 -0
- runs/Jul11_13-55-03_shivneri/events.out.tfevents.1752222309.shivneri +3 -0
- runs/Jul11_14-05-26_shivneri/events.out.tfevents.1752222931.shivneri +3 -0
- runs/Jul14_10-13-15_shivneri/events.out.tfevents.1752468217.shivneri +3 -0
- runs/Jul14_10-24-15_shivneri/events.out.tfevents.1752468859.shivneri +3 -0
- runs/Jul14_10-30-22_shivneri/events.out.tfevents.1752469226.shivneri +3 -0
- special_tokens_map.json +36 -0
- tokenizer.json +3 -0
- tokenizer_config.json +0 -0
- training_args.bin +3 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
base_model: google/gemma-3n-e2b-it
|
| 3 |
+
library_name: transformers
|
| 4 |
+
model_name: medgemma-4b-oraclebio_prompt
|
| 5 |
+
tags:
|
| 6 |
+
- generated_from_trainer
|
| 7 |
+
- trl
|
| 8 |
+
- sft
|
| 9 |
+
licence: license
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
+
# Model Card for medgemma-4b-oraclebio_prompt
|
| 13 |
+
|
| 14 |
+
This model is a fine-tuned version of [google/gemma-3n-e2b-it](https://huggingface.co/google/gemma-3n-e2b-it).
|
| 15 |
+
It has been trained using [TRL](https://github.com/huggingface/trl).
|
| 16 |
+
|
| 17 |
+
## Quick start
|
| 18 |
+
|
| 19 |
+
```python
|
| 20 |
+
from transformers import pipeline
|
| 21 |
+
|
| 22 |
+
question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?"
|
| 23 |
+
generator = pipeline("text-generation", model="kadcoder/medgemma-4b-oraclebio_prompt", device="cuda")
|
| 24 |
+
output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0]
|
| 25 |
+
print(output["generated_text"])
|
| 26 |
+
```
|
| 27 |
+
|
| 28 |
+
## Training procedure
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
This model was trained with SFT.
|
| 34 |
+
|
| 35 |
+
### Framework versions
|
| 36 |
+
|
| 37 |
+
- TRL: 0.19.1
|
| 38 |
+
- Transformers: 4.53.1
|
| 39 |
+
- Pytorch: 2.4.1
|
| 40 |
+
- Datasets: 4.0.0
|
| 41 |
+
- Tokenizers: 0.21.2
|
| 42 |
+
|
| 43 |
+
## Citations
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
Cite TRL as:
|
| 48 |
+
|
| 49 |
+
```bibtex
|
| 50 |
+
@misc{vonwerra2022trl,
|
| 51 |
+
title = {{TRL: Transformer Reinforcement Learning}},
|
| 52 |
+
author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec},
|
| 53 |
+
year = 2020,
|
| 54 |
+
journal = {GitHub repository},
|
| 55 |
+
publisher = {GitHub},
|
| 56 |
+
howpublished = {\url{https://github.com/huggingface/trl}}
|
| 57 |
+
}
|
| 58 |
+
```
|
adapter_config.json
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": "google/gemma-3n-e2b-it",
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"corda_config": null,
|
| 7 |
+
"eva_config": null,
|
| 8 |
+
"exclude_modules": null,
|
| 9 |
+
"fan_in_fan_out": false,
|
| 10 |
+
"inference_mode": true,
|
| 11 |
+
"init_lora_weights": true,
|
| 12 |
+
"layer_replication": null,
|
| 13 |
+
"layers_pattern": null,
|
| 14 |
+
"layers_to_transform": null,
|
| 15 |
+
"loftq_config": {},
|
| 16 |
+
"lora_alpha": 16,
|
| 17 |
+
"lora_bias": false,
|
| 18 |
+
"lora_dropout": 0.05,
|
| 19 |
+
"megatron_config": null,
|
| 20 |
+
"megatron_core": "megatron.core",
|
| 21 |
+
"modules_to_save": [
|
| 22 |
+
"lm_head",
|
| 23 |
+
"embed_tokens"
|
| 24 |
+
],
|
| 25 |
+
"peft_type": "LORA",
|
| 26 |
+
"qalora_group_size": 16,
|
| 27 |
+
"r": 16,
|
| 28 |
+
"rank_pattern": {},
|
| 29 |
+
"revision": null,
|
| 30 |
+
"target_modules": [
|
| 31 |
+
"prediction_coefs",
|
| 32 |
+
"altup_projections.1",
|
| 33 |
+
"modality_router",
|
| 34 |
+
"o_proj",
|
| 35 |
+
"ffw_layer_1",
|
| 36 |
+
"ffw_layer_2",
|
| 37 |
+
"input_proj_linear",
|
| 38 |
+
"v_proj",
|
| 39 |
+
"altup_unembed_projections.1",
|
| 40 |
+
"per_layer_input_gate",
|
| 41 |
+
"per_layer_model_projection",
|
| 42 |
+
"gate_proj",
|
| 43 |
+
"linear_end",
|
| 44 |
+
"altup_projections.0",
|
| 45 |
+
"altup_unembed_projections.2",
|
| 46 |
+
"post",
|
| 47 |
+
"embedding_projection",
|
| 48 |
+
"linear_left",
|
| 49 |
+
"altup_projections.2",
|
| 50 |
+
"pos_proj",
|
| 51 |
+
"linear_start",
|
| 52 |
+
"q_proj",
|
| 53 |
+
"down_proj",
|
| 54 |
+
"linear_right",
|
| 55 |
+
"up_proj",
|
| 56 |
+
"altup_unembed_projections.0",
|
| 57 |
+
"k_proj",
|
| 58 |
+
"per_layer_projection",
|
| 59 |
+
"correction_coefs"
|
| 60 |
+
],
|
| 61 |
+
"task_type": "CAUSAL_LM",
|
| 62 |
+
"trainable_token_indices": null,
|
| 63 |
+
"use_dora": false,
|
| 64 |
+
"use_qalora": false,
|
| 65 |
+
"use_rslora": false
|
| 66 |
+
}
|
adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e05cf8cc5f852e0d145161f3e08bfaa431da694043919e5a7b2d78393a227fca
|
| 3 |
+
size 2299383480
|
chat_template.jinja
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{{ bos_token }}
|
| 2 |
+
{%- if messages[0]['role'] == 'system' -%}
|
| 3 |
+
{%- if messages[0]['content'] is string -%}
|
| 4 |
+
{%- set first_user_prefix = messages[0]['content'] + '
|
| 5 |
+
|
| 6 |
+
' -%}
|
| 7 |
+
{%- else -%}
|
| 8 |
+
{%- set first_user_prefix = messages[0]['content'][0]['text'] + '
|
| 9 |
+
|
| 10 |
+
' -%}
|
| 11 |
+
{%- endif -%}
|
| 12 |
+
{%- set loop_messages = messages[1:] -%}
|
| 13 |
+
{%- else -%}
|
| 14 |
+
{%- set first_user_prefix = "" -%}
|
| 15 |
+
{%- set loop_messages = messages -%}
|
| 16 |
+
{%- endif -%}
|
| 17 |
+
{%- for message in loop_messages -%}
|
| 18 |
+
{%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}
|
| 19 |
+
{{ raise_exception("Conversation roles must alternate user/assistant/user/assistant/...") }}
|
| 20 |
+
{%- endif -%}
|
| 21 |
+
{%- if (message['role'] == 'assistant') -%}
|
| 22 |
+
{%- set role = "model" -%}
|
| 23 |
+
{%- else -%}
|
| 24 |
+
{%- set role = message['role'] -%}
|
| 25 |
+
{%- endif -%}
|
| 26 |
+
{{ '<start_of_turn>' + role + '
|
| 27 |
+
' + (first_user_prefix if loop.first else "") }}
|
| 28 |
+
{%- if message['content'] is string -%}
|
| 29 |
+
{{ message['content'] | trim }}
|
| 30 |
+
{%- elif message['content'] is iterable -%}
|
| 31 |
+
{%- for item in message['content'] -%}
|
| 32 |
+
{%- if item['type'] == 'audio' -%}
|
| 33 |
+
{{ '<audio_soft_token>' }}
|
| 34 |
+
{%- elif item['type'] == 'image' -%}
|
| 35 |
+
{{ '<image_soft_token>' }}
|
| 36 |
+
{%- elif item['type'] == 'text' -%}
|
| 37 |
+
{{ item['text'] | trim }}
|
| 38 |
+
{%- endif -%}
|
| 39 |
+
{%- endfor -%}
|
| 40 |
+
{%- else -%}
|
| 41 |
+
{{ raise_exception("Invalid content type") }}
|
| 42 |
+
{%- endif -%}
|
| 43 |
+
{{ '<end_of_turn>
|
| 44 |
+
' }}
|
| 45 |
+
{%- endfor -%}
|
| 46 |
+
{%- if add_generation_prompt -%}
|
| 47 |
+
{{'<start_of_turn>model
|
| 48 |
+
'}}
|
| 49 |
+
{%- endif -%}
|
preprocessor_config.json
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"crop_size": null,
|
| 3 |
+
"data_format": "channels_first",
|
| 4 |
+
"default_to_square": false,
|
| 5 |
+
"device": null,
|
| 6 |
+
"disable_grouping": null,
|
| 7 |
+
"dither": 0.0,
|
| 8 |
+
"do_center_crop": null,
|
| 9 |
+
"do_convert_rgb": null,
|
| 10 |
+
"do_normalize": false,
|
| 11 |
+
"do_rescale": true,
|
| 12 |
+
"do_resize": true,
|
| 13 |
+
"feature_size": 128,
|
| 14 |
+
"fft_length": 1024,
|
| 15 |
+
"fft_overdrive": true,
|
| 16 |
+
"frame_length": 512,
|
| 17 |
+
"hop_length": 160,
|
| 18 |
+
"image_mean": [
|
| 19 |
+
0.5,
|
| 20 |
+
0.5,
|
| 21 |
+
0.5
|
| 22 |
+
],
|
| 23 |
+
"image_processor_type": "SiglipImageProcessorFast",
|
| 24 |
+
"image_seq_length": 256,
|
| 25 |
+
"image_std": [
|
| 26 |
+
0.5,
|
| 27 |
+
0.5,
|
| 28 |
+
0.5
|
| 29 |
+
],
|
| 30 |
+
"input_data_format": null,
|
| 31 |
+
"input_scale_factor": 1.0,
|
| 32 |
+
"max_frequency": 7600.0,
|
| 33 |
+
"mel_floor": 1e-05,
|
| 34 |
+
"min_frequency": 125.0,
|
| 35 |
+
"padding_side": "right",
|
| 36 |
+
"padding_value": 0.0,
|
| 37 |
+
"per_bin_mean": null,
|
| 38 |
+
"per_bin_stddev": null,
|
| 39 |
+
"preemphasis": 0.97,
|
| 40 |
+
"preemphasis_htk_flavor": true,
|
| 41 |
+
"processor_class": "Gemma3nProcessor",
|
| 42 |
+
"resample": 2,
|
| 43 |
+
"rescale_factor": 0.00392156862745098,
|
| 44 |
+
"return_attention_mask": true,
|
| 45 |
+
"return_tensors": null,
|
| 46 |
+
"sampling_rate": 16000,
|
| 47 |
+
"size": {
|
| 48 |
+
"height": 768,
|
| 49 |
+
"width": 768
|
| 50 |
+
}
|
| 51 |
+
}
|
processor_config.json
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"audio_seq_length": 188,
|
| 3 |
+
"image_seq_length": 256,
|
| 4 |
+
"processor_class": "Gemma3nProcessor"
|
| 5 |
+
}
|
runs/Jul11_13-38-35_shivneri/events.out.tfevents.1752221320.shivneri
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9d3508a1f01df86e8e27392c8c5aa5bb6550701677cf71ee817be2195bcbbf98
|
| 3 |
+
size 9601
|
runs/Jul11_13-55-03_shivneri/events.out.tfevents.1752222309.shivneri
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4d6b01d2e6f4d85cfd20ef8932730e2a6df859aa4d13b73438c1f445d61f4ae4
|
| 3 |
+
size 9601
|
runs/Jul11_14-05-26_shivneri/events.out.tfevents.1752222931.shivneri
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:048ff600ab33258c946caad03b27c4304774835141f4cf99f5cbed4a0267ba6a
|
| 3 |
+
size 9597
|
runs/Jul14_10-13-15_shivneri/events.out.tfevents.1752468217.shivneri
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d0bfd7bd691f50f535824488e0c53703d7ea04500295d68da67deee0fce9a2d3
|
| 3 |
+
size 9601
|
runs/Jul14_10-24-15_shivneri/events.out.tfevents.1752468859.shivneri
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f38933041225de7aced7b6ca2839badec650a5e34882f43c17eccedf86b97dfa
|
| 3 |
+
size 9601
|
runs/Jul14_10-30-22_shivneri/events.out.tfevents.1752469226.shivneri
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4fa83838e8c3ba125a260808511494ec6b5fed6869a24e3148a1f865f641115a
|
| 3 |
+
size 11011
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"audio_token": "<audio_soft_token>",
|
| 3 |
+
"boa_token": "<start_of_audio>",
|
| 4 |
+
"boi_token": "<start_of_image>",
|
| 5 |
+
"bos_token": {
|
| 6 |
+
"content": "<bos>",
|
| 7 |
+
"lstrip": false,
|
| 8 |
+
"normalized": false,
|
| 9 |
+
"rstrip": false,
|
| 10 |
+
"single_word": false
|
| 11 |
+
},
|
| 12 |
+
"eoa_token": "<end_of_audio>",
|
| 13 |
+
"eoi_token": "<end_of_image>",
|
| 14 |
+
"eos_token": {
|
| 15 |
+
"content": "<eos>",
|
| 16 |
+
"lstrip": false,
|
| 17 |
+
"normalized": false,
|
| 18 |
+
"rstrip": false,
|
| 19 |
+
"single_word": false
|
| 20 |
+
},
|
| 21 |
+
"image_token": "<image_soft_token>",
|
| 22 |
+
"pad_token": {
|
| 23 |
+
"content": "<pad>",
|
| 24 |
+
"lstrip": false,
|
| 25 |
+
"normalized": false,
|
| 26 |
+
"rstrip": false,
|
| 27 |
+
"single_word": false
|
| 28 |
+
},
|
| 29 |
+
"unk_token": {
|
| 30 |
+
"content": "<unk>",
|
| 31 |
+
"lstrip": false,
|
| 32 |
+
"normalized": false,
|
| 33 |
+
"rstrip": false,
|
| 34 |
+
"single_word": false
|
| 35 |
+
}
|
| 36 |
+
}
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ed504d44c6898db6d71f4abf77cf8e12a9ee3ac90bb1aec0e1edc012c4251b23
|
| 3 |
+
size 33442708
|
tokenizer_config.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:21ac031eb0a87aabba756d0f187da17faa495bc1746c19689784401172cace2e
|
| 3 |
+
size 5816
|