remove abundant contents
Browse files- config.json +5 -5
- model-00001-of-00004.safetensors +2 -2
- model.safetensors.index.json +1 -4
config.json
CHANGED
|
@@ -1,14 +1,14 @@
|
|
| 1 |
{
|
| 2 |
-
"_name_or_path": "Magma-8B",
|
| 3 |
"architectures": [
|
| 4 |
"MagmaForCausalLM"
|
| 5 |
],
|
| 6 |
-
"auto_map": {
|
| 7 |
-
"AutoConfig": "configuration_magma.MagmaConfig",
|
| 8 |
-
"AutoModelForCausalLM": "modeling_magma.MagmaForCausalLM"
|
| 9 |
-
},
|
| 10 |
"attention_bias": false,
|
| 11 |
"attention_dropout": 0.0,
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
"hidden_act": "silu",
|
| 13 |
"hidden_size": 4096,
|
| 14 |
"image_token_index": 128257,
|
|
|
|
| 1 |
{
|
| 2 |
+
"_name_or_path": "microsoft/Magma-8B",
|
| 3 |
"architectures": [
|
| 4 |
"MagmaForCausalLM"
|
| 5 |
],
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
"attention_bias": false,
|
| 7 |
"attention_dropout": 0.0,
|
| 8 |
+
"auto_map": {
|
| 9 |
+
"AutoConfig": "microsoft/Magma-8B--configuration_magma.MagmaConfig",
|
| 10 |
+
"AutoModelForCausalLM": "microsoft/Magma-8B--modeling_magma.MagmaForCausalLM"
|
| 11 |
+
},
|
| 12 |
"hidden_act": "silu",
|
| 13 |
"hidden_size": 4096,
|
| 14 |
"image_token_index": 128257,
|
model-00001-of-00004.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cf018c291b927b63801b3d036b96f553a351f085852db2b4c62025323e6e2bee
|
| 3 |
+
size 4983726904
|
model.safetensors.index.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
{
|
| 2 |
"metadata": {
|
| 3 |
-
"total_size":
|
| 4 |
},
|
| 5 |
"weight_map": {
|
| 6 |
"language_model.lm_head.weight": "model-00004-of-00004.safetensors",
|
|
@@ -298,9 +298,6 @@
|
|
| 298 |
"multi_modal_projector.proj.0.weight": "model-00001-of-00004.safetensors",
|
| 299 |
"multi_modal_projector.proj.2.bias": "model-00001-of-00004.safetensors",
|
| 300 |
"multi_modal_projector.proj.2.weight": "model-00001-of-00004.safetensors",
|
| 301 |
-
"multi_modal_projector.proj_segtok.0.weight": "model-00001-of-00004.safetensors",
|
| 302 |
-
"multi_modal_projector.proj_segtok.2.weight": "model-00001-of-00004.safetensors",
|
| 303 |
-
"multi_modal_projector.proj_segtok.4.weight": "model-00001-of-00004.safetensors",
|
| 304 |
"multi_modal_projector.row_seperator": "model-00001-of-00004.safetensors",
|
| 305 |
"vision_tower.clip_vision_model.head.proj.weight": "model-00001-of-00004.safetensors",
|
| 306 |
"vision_tower.clip_vision_model.trunk.head.norm.bias": "model-00001-of-00004.safetensors",
|
|
|
|
| 1 |
{
|
| 2 |
"metadata": {
|
| 3 |
+
"total_size": 17812436736
|
| 4 |
},
|
| 5 |
"weight_map": {
|
| 6 |
"language_model.lm_head.weight": "model-00004-of-00004.safetensors",
|
|
|
|
| 298 |
"multi_modal_projector.proj.0.weight": "model-00001-of-00004.safetensors",
|
| 299 |
"multi_modal_projector.proj.2.bias": "model-00001-of-00004.safetensors",
|
| 300 |
"multi_modal_projector.proj.2.weight": "model-00001-of-00004.safetensors",
|
|
|
|
|
|
|
|
|
|
| 301 |
"multi_modal_projector.row_seperator": "model-00001-of-00004.safetensors",
|
| 302 |
"vision_tower.clip_vision_model.head.proj.weight": "model-00001-of-00004.safetensors",
|
| 303 |
"vision_tower.clip_vision_model.trunk.head.norm.bias": "model-00001-of-00004.safetensors",
|