_leaderboard
stringclasses 1
value | _developer
stringclasses 559
values | _model
stringlengths 9
102
| _uuid
stringlengths 36
36
| schema_version
stringclasses 1
value | evaluation_id
stringlengths 35
133
| retrieved_timestamp
stringlengths 13
18
| source_data
stringclasses 1
value | evaluation_source_name
stringclasses 1
value | evaluation_source_type
stringclasses 1
value | source_organization_name
stringclasses 1
value | source_organization_url
null | source_organization_logo_url
null | evaluator_relationship
stringclasses 1
value | model_name
stringlengths 4
102
| model_id
stringlengths 9
102
| model_developer
stringclasses 559
values | model_inference_platform
stringclasses 1
value | evaluation_results
stringlengths 1.35k
1.41k
| additional_details
stringclasses 660
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
HF Open LLM v2
|
nvidia
|
nvidia/Minitron-4B-Base
|
f5e52953-2dfc-4661-81cd-ed96d7a52482
|
0.0.1
|
hfopenllm_v2/nvidia_Minitron-4B-Base/1762652580.415251
|
1762652580.415252
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
nvidia/Minitron-4B-Base
|
nvidia/Minitron-4B-Base
|
nvidia
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2217937295265451}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4083876243992497}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.019637462235649546}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26929530201342283}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.413375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.261968085106383}}]
|
{"precision": "bfloat16", "architecture": "NemotronForCausalLM", "params_billions": 4.0}
|
HF Open LLM v2
|
nvidia
|
nvidia/AceInstruct-1.5B
|
a26b4b3f-aad1-4d2f-a97a-bf24850a3092
|
0.0.1
|
hfopenllm_v2/nvidia_AceInstruct-1.5B/1762652580.412246
|
1762652580.412247
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
nvidia/AceInstruct-1.5B
|
nvidia/AceInstruct-1.5B
|
nvidia
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3947758613811354}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3931958135346713}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31268882175226587}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27181208053691275}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34600000000000003}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2573969414893617}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.777}
|
HF Open LLM v2
|
nvidia
|
nvidia/Mistral-NeMo-Minitron-8B-Instruct
|
f4c299f0-d957-4784-8512-23f72a26a095
|
0.0.1
|
hfopenllm_v2/nvidia_Mistral-NeMo-Minitron-8B-Instruct/1762652580.415967
|
1762652580.415968
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
nvidia/Mistral-NeMo-Minitron-8B-Instruct
|
nvidia/Mistral-NeMo-Minitron-8B-Instruct
|
nvidia
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5003889679384035}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5320919605840294}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1163141993957704}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.287751677852349}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38857291666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39910239361702127}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 8.414}
|
HF Open LLM v2
|
nvidia
|
nvidia/AceMath-7B-Instruct
|
e1c94d59-dfa4-49cf-9052-9ce6e713a0be
|
0.0.1
|
hfopenllm_v2/nvidia_AceMath-7B-Instruct/1762652580.413503
|
1762652580.413504
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
nvidia/AceMath-7B-Instruct
|
nvidia/AceMath-7B-Instruct
|
nvidia
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45317756885064964}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.49938547326244365}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6336858006042296}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29194630872483224}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4192708333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.33834773936170215}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
|
HF Open LLM v2
|
nvidia
|
nvidia/AceMath-72B-RM
|
5fdd0c8f-3393-4b59-8cc1-511c524c493a
|
0.0.1
|
hfopenllm_v2/nvidia_AceMath-72B-RM/1762652580.413297
|
1762652580.413298
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
nvidia/AceMath-72B-RM
|
nvidia/AceMath-72B-RM
|
nvidia
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.14125963554479892}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2717426350897727}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.23406040268456377}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3351458333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11785239361702128}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForSequenceClassification", "params_billions": 71.461}
|
HF Open LLM v2
|
nvidia
|
nvidia/AceMath-72B-Instruct
|
4ba1027b-f0c1-4ed9-aa30-35c4e01e564d
|
0.0.1
|
hfopenllm_v2/nvidia_AceMath-72B-Instruct/1762652580.413093
|
1762652580.4130938
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
nvidia/AceMath-72B-Instruct
|
nvidia/AceMath-72B-Instruct
|
nvidia
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.494993284485166}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.640215611099268}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7145015105740181}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2709731543624161}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40615625}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44107380319148937}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 72.706}
|
HF Open LLM v2
|
nvidia
|
nvidia/Minitron-8B-Base
|
3f6ec864-adf4-422f-85c1-19ef2417489a
|
0.0.1
|
hfopenllm_v2/nvidia_Minitron-8B-Base/1762652580.415456
|
1762652580.415456
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
nvidia/Minitron-8B-Base
|
nvidia/Minitron-8B-Base
|
nvidia
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24242676099416216}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43950631883576047}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0256797583081571}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27348993288590606}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40255208333333337}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31806848404255317}}]
|
{"precision": "bfloat16", "architecture": "NemotronForCausalLM", "params_billions": 7.22}
|
HF Open LLM v2
|
nvidia
|
nvidia/Hymba-1.5B-Base
|
89f9149f-1f6d-4389-819a-d958b0ecc6b8
|
0.0.1
|
hfopenllm_v2/nvidia_Hymba-1.5B-Base/1762652580.4142
|
1762652580.4142022
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
nvidia/Hymba-1.5B-Base
|
nvidia/Hymba-1.5B-Base
|
nvidia
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2295121389025563}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.32564785214182224}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.013595166163141994}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2558724832214765}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3566354166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.19223736702127658}}]
|
{"precision": "bfloat16", "architecture": "HymbaForCausalLM", "params_billions": 1.523}
|
HF Open LLM v2
|
nvidia
|
nvidia/AceInstruct-72B
|
08e924b1-121c-4ff7-bf1d-06b9cb90c7c0
|
0.0.1
|
hfopenllm_v2/nvidia_AceInstruct-72B/1762652580.4124959
|
1762652580.4124968
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
nvidia/AceInstruct-72B
|
nvidia/AceInstruct-72B
|
nvidia
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.711888899231816}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6139041785911337}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6261329305135952}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3213087248322148}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42060416666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48736702127659576}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 72.706}
|
HF Open LLM v2
|
nvidia
|
nvidia/Nemotron-Mini-4B-Instruct
|
ab7ee3ac-4d47-4ec6-a2af-8a6f7eb96684
|
0.0.1
|
hfopenllm_v2/nvidia_Nemotron-Mini-4B-Instruct/1762652580.41618
|
1762652580.416181
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
nvidia/Nemotron-Mini-4B-Instruct
|
nvidia/Nemotron-Mini-4B-Instruct
|
nvidia
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6668761109411916}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3864840798591535}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0256797583081571}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2802013422818792}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3767291666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26263297872340424}}]
|
{"precision": "bfloat16", "architecture": "NemotronForCausalLM", "params_billions": 4.0}
|
HF Open LLM v2
|
nvidia
|
nvidia/AceMath-7B-RM
|
ab9c685d-7b97-4bf4-bc0e-ffd5666e35d9
|
0.0.1
|
hfopenllm_v2/nvidia_AceMath-7B-RM/1762652580.4138508
|
1762652580.413853
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
nvidia/AceMath-7B-RM
|
nvidia/AceMath-7B-RM
|
nvidia
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.14937809456686035}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2422689292768334}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24580536912751677}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35800000000000004}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11386303191489362}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForSequenceClassification", "params_billions": 7.071}
|
HF Open LLM v2
|
SenseLLM
|
SenseLLM/ReflectionCoder-DS-33B
|
2ee4584d-b18c-44dd-af63-22c28b92e107
|
0.0.1
|
hfopenllm_v2/SenseLLM_ReflectionCoder-DS-33B/1762652579.878793
|
1762652579.878794
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
SenseLLM/ReflectionCoder-DS-33B
|
SenseLLM/ReflectionCoder-DS-33B
|
SenseLLM
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3786641666334215}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3449447540164568}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.030211480362537766}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27432885906040266}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3343125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.12017952127659574}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 33.34}
|
HF Open LLM v2
|
SenseLLM
|
SenseLLM/ReflectionCoder-CL-34B
|
5d7a3d90-8017-4415-a1da-eb70f6145fe4
|
0.0.1
|
hfopenllm_v2/SenseLLM_ReflectionCoder-CL-34B/1762652579.8785448
|
1762652579.878546
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
SenseLLM/ReflectionCoder-CL-34B
|
SenseLLM/ReflectionCoder-CL-34B
|
SenseLLM
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4007710652180658}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39529304297033296}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.03323262839879154}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25083892617449666}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41548958333333336}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.14237034574468085}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 33.744}
|
HF Open LLM v2
|
aixonlab
|
aixonlab/Zara-14b-v1.2
|
a4c3ddcb-482c-47fb-9290-3c0678b38fb4
|
0.0.1
|
hfopenllm_v2/aixonlab_Zara-14b-v1.2/1762652579.979647
|
1762652579.979647
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
aixonlab/Zara-14b-v1.2
|
aixonlab/Zara-14b-v1.2
|
aixonlab
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6197400674654362}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6405368457456163}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35347432024169184}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38171140939597314}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46747916666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5263464095744681}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.766}
|
HF Open LLM v2
|
aixonlab
|
aixonlab/Aether-12b
|
831b6f81-1552-4a7b-acac-eb927001e440
|
0.0.1
|
hfopenllm_v2/aixonlab_Aether-12b/1762652579.979132
|
1762652579.979133
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
aixonlab/Aether-12b
|
aixonlab/Aether-12b
|
aixonlab
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.23468286369056326}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5179400750435481}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.10649546827794562}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3162751677852349}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38286458333333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3410073138297872}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
|
HF Open LLM v2
|
aixonlab
|
aixonlab/Grey-12b
|
2c4626c7-3016-4641-9862-0ba4f7f7936c
|
0.0.1
|
hfopenllm_v2/aixonlab_Grey-12b/1762652579.979384
|
1762652579.9793851
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
aixonlab/Grey-12b
|
aixonlab/Grey-12b
|
aixonlab
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39679938119744496}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5698957505959833}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.09818731117824774}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30033557046979864}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4516354166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3779089095744681}}]
|
{"precision": "float16", "architecture": "MistralForCausalLM", "params_billions": 12.248}
|
HF Open LLM v2
|
HuggingFaceTB
|
HuggingFaceTB/SmolLM2-135M
|
1761caca-524f-4d59-81dd-631e3e24e0e5
|
0.0.1
|
hfopenllm_v2/HuggingFaceTB_SmolLM2-135M/1762652579.643546
|
1762652579.6435468
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
HuggingFaceTB/SmolLM2-135M
|
HuggingFaceTB/SmolLM2-135M
|
HuggingFaceTB
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.18177657504310785}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3044234246877141}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.012084592145015106}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2483221476510067}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4111770833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.10945811170212766}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 0.135}
|
HF Open LLM v2
|
HuggingFaceTB
|
HuggingFaceTB/SmolLM2-1.7B
|
db57503c-bfe7-4691-983e-68af941e8b1e
|
0.0.1
|
hfopenllm_v2/HuggingFaceTB_SmolLM2-1.7B/1762652579.6430368
|
1762652579.643038
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
HuggingFaceTB/SmolLM2-1.7B
|
HuggingFaceTB/SmolLM2-1.7B
|
HuggingFaceTB
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2440003634800108}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3452594377166261}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.026435045317220542}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27936241610738255}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3485416666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2137632978723404}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.71}
|
HF Open LLM v2
|
HuggingFaceTB
|
HuggingFaceTB/SmolLM-135M-Instruct
|
adff7af4-9bae-420a-9751-9f68ab81bf99
|
0.0.1
|
hfopenllm_v2/HuggingFaceTB_SmolLM-135M-Instruct/1762652579.642397
|
1762652579.6423979
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
HuggingFaceTB/SmolLM-135M-Instruct
|
HuggingFaceTB/SmolLM-135M-Instruct
|
HuggingFaceTB
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.12140121544169469}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30150816789978757}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.005287009063444109}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25922818791946306}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36345833333333327}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11760305851063829}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 0.135}
|
HF Open LLM v2
|
HuggingFaceTB
|
HuggingFaceTB/SmolLM-1.7B
|
e1b7c18a-bff1-44a3-b589-95bcb0f88e36
|
0.0.1
|
hfopenllm_v2/HuggingFaceTB_SmolLM-1.7B/1762652579.6417458
|
1762652579.6417458
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
HuggingFaceTB/SmolLM-1.7B
|
HuggingFaceTB/SmolLM-1.7B
|
HuggingFaceTB
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.23615673080759053}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3180516538964782}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.01661631419939577}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24161073825503357}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34209375000000003}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11477726063829788}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.71}
|
HF Open LLM v2
|
HuggingFaceTB
|
HuggingFaceTB/SmolLM2-360M-Instruct
|
06409b6c-9d26-4bee-af75-16e6edb87a93
|
0.0.1
|
hfopenllm_v2/HuggingFaceTB_SmolLM2-360M-Instruct/1762652579.644474
|
1762652579.644475
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
HuggingFaceTB/SmolLM2-360M-Instruct
|
HuggingFaceTB/SmolLM2-360M-Instruct
|
HuggingFaceTB
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.08303191088533979}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3052703401844317}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.008308157099697885}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2651006711409396}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34228125000000004}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11261635638297872}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 0.362}
|
HF Open LLM v2
|
HuggingFaceTB
|
HuggingFaceTB/SmolLM2-360M-Instruct
|
09ba6e80-5ab4-4c8c-b7ad-c1497413c207
|
0.0.1
|
hfopenllm_v2/HuggingFaceTB_SmolLM2-360M-Instruct/1762652579.6446972
|
1762652579.6446981
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
HuggingFaceTB/SmolLM2-360M-Instruct
|
HuggingFaceTB/SmolLM2-360M-Instruct
|
HuggingFaceTB
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38415958545548035}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31435050538888504}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.015105740181268883}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2550335570469799}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.346125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11170212765957446}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 0.36}
|
HF Open LLM v2
|
HuggingFaceTB
|
HuggingFaceTB/SmolLM2-360M
|
7751b65d-2bba-465c-9a1e-5ae51d94fcf6
|
0.0.1
|
hfopenllm_v2/HuggingFaceTB_SmolLM2-360M/1762652579.6442492
|
1762652579.6442502
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
HuggingFaceTB/SmolLM2-360M
|
HuggingFaceTB/SmolLM2-360M
|
HuggingFaceTB
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.21145227995053123}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3233478044302361}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.012084592145015106}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24580536912751677}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3954270833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11693816489361702}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 0.36}
|
HF Open LLM v2
|
HuggingFaceTB
|
HuggingFaceTB/SmolLM-135M
|
8cd60e42-3429-4938-b43e-9c951a57ca9f
|
0.0.1
|
hfopenllm_v2/HuggingFaceTB_SmolLM-135M/1762652579.642195
|
1762652579.642196
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
HuggingFaceTB/SmolLM-135M
|
HuggingFaceTB/SmolLM-135M
|
HuggingFaceTB
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.21247622973709757}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3046054260062988}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.013595166163141994}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25838926174496646}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4366041666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11220079787234043}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 0.13}
|
HF Open LLM v2
|
HuggingFaceTB
|
HuggingFaceTB/SmolLM-360M
|
236f7bdd-be50-4287-82b7-6efddc9dd3f4
|
0.0.1
|
hfopenllm_v2/HuggingFaceTB_SmolLM-360M/1762652579.642613
|
1762652579.6426141
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
HuggingFaceTB/SmolLM-360M
|
HuggingFaceTB/SmolLM-360M
|
HuggingFaceTB
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2133505764704318}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30645160333152527}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.011329305135951661}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2676174496644295}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40178125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11236702127659574}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 0.36}
|
HF Open LLM v2
|
HuggingFaceTB
|
HuggingFaceTB/SmolLM-1.7B-Instruct
|
690a5844-000e-4949-bbf9-8bd1ff2cb1bd
|
0.0.1
|
hfopenllm_v2/HuggingFaceTB_SmolLM-1.7B-Instruct/1762652579.641991
|
1762652579.641991
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
HuggingFaceTB/SmolLM-1.7B-Instruct
|
HuggingFaceTB/SmolLM-1.7B-Instruct
|
HuggingFaceTB
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.23478259905938464}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28851114363217695}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.021148036253776436}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2600671140939597}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3486666666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11660571808510638}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.71}
|
HF Open LLM v2
|
HuggingFaceTB
|
HuggingFaceTB/SmolLM2-1.7B-Instruct
|
09b81183-8ff2-44d5-a515-63cddc3e55c6
|
0.0.1
|
hfopenllm_v2/HuggingFaceTB_SmolLM2-1.7B-Instruct/1762652579.643299
|
1762652579.6433
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
HuggingFaceTB/SmolLM2-1.7B-Instruct
|
HuggingFaceTB/SmolLM2-1.7B-Instruct
|
HuggingFaceTB
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5367835121920947}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3598617531415158}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0581570996978852}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27936241610738255}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.342125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2053690159574468}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 1.711}
|
HF Open LLM v2
|
HuggingFaceTB
|
HuggingFaceTB/SmolLM-360M-Instruct
|
ec13c105-c846-4420-91af-d42e98b7a818
|
0.0.1
|
hfopenllm_v2/HuggingFaceTB_SmolLM-360M-Instruct/1762652579.642821
|
1762652579.642821
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
HuggingFaceTB/SmolLM-360M-Instruct
|
HuggingFaceTB/SmolLM-360M-Instruct
|
HuggingFaceTB
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.19516549422199764}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28851114363217695}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.01812688821752266}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26426174496644295}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34717708333333336}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11660571808510638}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 0.362}
|
HF Open LLM v2
|
HuggingFaceTB
|
HuggingFaceTB/SmolLM2-135M-Instruct
|
9a9fb17d-49ae-4a82-95c8-c8b55923d72f
|
0.0.1
|
hfopenllm_v2/HuggingFaceTB_SmolLM2-135M-Instruct/1762652579.644038
|
1762652579.644039
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
HuggingFaceTB/SmolLM2-135M-Instruct
|
HuggingFaceTB/SmolLM2-135M-Instruct
|
HuggingFaceTB
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.05925167444602544}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.31347502947335903}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.014350453172205438}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.23406040268456377}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3871458333333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.10920877659574468}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 0.135}
|
HF Open LLM v2
|
HuggingFaceTB
|
HuggingFaceTB/SmolLM2-135M-Instruct
|
df60b16b-184c-43d9-ac79-8627f09d265b
|
0.0.1
|
hfopenllm_v2/HuggingFaceTB_SmolLM2-135M-Instruct/1762652579.643796
|
1762652579.643796
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
HuggingFaceTB/SmolLM2-135M-Instruct
|
HuggingFaceTB/SmolLM2-135M-Instruct
|
HuggingFaceTB
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2883138960181208}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3124321328066677}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0030211480362537764}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.23573825503355705}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36621875000000004}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11145279255319149}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 0.135}
|
HF Open LLM v2
|
davidkim205
|
davidkim205/Rhea-72b-v0.5
|
106de4e2-a8d3-40d3-bdbc-0b95930e9ba6
|
0.0.1
|
hfopenllm_v2/davidkim205_Rhea-72b-v0.5/1762652580.1208682
|
1762652580.1208699
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
davidkim205/Rhea-72b-v0.5
|
davidkim205/Rhea-72b-v0.5
|
davidkim205
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.014538092261865185}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30783395929068597}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17371601208459214}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2525167785234899}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42413541666666665}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11660571808510638}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 72.0}
|
HF Open LLM v2
|
davidkim205
|
davidkim205/nox-solar-10.7b-v4
|
fcc755d0-6269-49e6-890b-4a14417601a1
|
0.0.1
|
hfopenllm_v2/davidkim205_nox-solar-10.7b-v4/1762652580.1212
|
1762652580.1212008
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
davidkim205/nox-solar-10.7b-v4
|
davidkim205/nox-solar-10.7b-v4
|
davidkim205
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3753418706809044}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4814038018918371}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.008308157099697885}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3070469798657718}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42984375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3332779255319149}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 10.732}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/Research_PathfinderAI
|
900e5686-083d-460c-918f-06a39936810c
|
0.0.1
|
hfopenllm_v2/Daemontatox_Research_PathfinderAI/1762652579.530894
|
1762652579.530895
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/Research_PathfinderAI
|
Daemontatox/Research_PathfinderAI
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3456916537010687}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.287225755504323}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.16993957703927492}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2407718120805369}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.33939583333333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11303191489361702}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.777}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/mini_Pathfinder
|
a9afd0b3-8189-47e0-9e33-d60540679e20
|
0.0.1
|
hfopenllm_v2/Daemontatox_mini_Pathfinder/1762652579.53272
|
1762652579.5327208
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/mini_Pathfinder
|
Daemontatox/mini_Pathfinder
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29615752869054107}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39556911910803755}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.47507552870090636}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25838926174496646}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37809374999999995}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28091755319148937}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/AetherUncensored
|
574d79eb-94ae-4b79-8763-77267d300670
|
0.0.1
|
hfopenllm_v2/Daemontatox_AetherUncensored/1762652579.525634
|
1762652579.5256362
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/AetherUncensored
|
Daemontatox/AetherUncensored
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40419309653940433}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44631282805144945}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.14501510574018128}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28859060402684567}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3746770833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27102726063829785}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/CogitoDistil
|
f39e1ca4-2a0f-4650-886b-4160760daee5
|
0.0.1
|
hfopenllm_v2/Daemontatox_CogitoDistil/1762652579.526295
|
1762652579.5262961
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/CogitoDistil
|
Daemontatox/CogitoDistil
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27764775240805506}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36767660461416857}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39274924471299094}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25922818791946306}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3754895833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2625498670212766}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/Zirel_1.5
|
661e2393-2560-4d25-a6f3-f0d680052e8e
|
0.0.1
|
hfopenllm_v2/Daemontatox_Zirel_1.5/1762652579.532257
|
1762652579.532258
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/Zirel_1.5
|
Daemontatox/Zirel_1.5
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4167575366693706}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3984669254999634}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11329305135951662}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2600671140939597}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.36581250000000004}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.21434507978723405}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.544}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/PathFinderAI2.0
|
274ab6b9-5fd7-41df-9076-b16c52947640
|
0.0.1
|
hfopenllm_v2/Daemontatox_PathFinderAI2.0/1762652579.528686
|
1762652579.528686
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/PathFinderAI2.0
|
Daemontatox/PathFinderAI2.0
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.45410178326839457}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.665823006477417}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5075528700906344}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30201342281879195}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4215625}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5546875}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 32.764}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/PathfinderAI
|
c07f2943-f3f4-46be-993e-be56dadcb561
|
0.0.1
|
hfopenllm_v2/Daemontatox_PathfinderAI/1762652579.5294342
|
1762652579.529435
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/PathfinderAI
|
Daemontatox/PathfinderAI
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4855006937148987}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6627335380624046}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48413897280966767}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30956375838926176}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42559375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.554188829787234}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 32.764}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/PathfinderAI
|
445f2c79-2c47-465c-ace7-73b3fa491454
|
0.0.1
|
hfopenllm_v2/Daemontatox_PathfinderAI/1762652579.529176
|
1762652579.5291772
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/PathfinderAI
|
Daemontatox/PathfinderAI
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37451739163198094}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6667854331232542}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.47583081570996977}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39429530201342283}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48583333333333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.559341755319149}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 32.764}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/PixelParse_AI
|
29459932-a7a5-458f-9778-e236cc4ea985
|
0.0.1
|
hfopenllm_v2/Daemontatox_PixelParse_AI/1762652579.529871
|
1762652579.529872
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/PixelParse_AI
|
Daemontatox/PixelParse_AI
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43829040279790954}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5034307630533988}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1472809667673716}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3238255033557047}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40518750000000003}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37782579787234044}}]
|
{"precision": "bfloat16", "architecture": "MllamaForConditionalGeneration", "params_billions": 10.67}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/CogitoZ
|
5e08074c-32bd-4ce6-a09f-7b5832cba288
|
0.0.1
|
hfopenllm_v2/Daemontatox_CogitoZ/1762652579.5265448
|
1762652579.526546
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/CogitoZ
|
Daemontatox/CogitoZ
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3967240255854466}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6734487392645502}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5241691842900302}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3951342281879195}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4792604166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5592586436170213}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 32.764}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/ReasonTest
|
39d481bf-ea86-42a7-a6f1-ce38ce9dce30
|
0.0.1
|
hfopenllm_v2/Daemontatox_ReasonTest/1762652579.530685
|
1762652579.530686
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/ReasonTest
|
Daemontatox/ReasonTest
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4079653098223824}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.543526397621609}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.21374622356495468}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3187919463087248}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43154166666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4271941489361702}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 3.808}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/Zirel-7B-Math
|
460de6c8-d706-420b-9c0a-a108ddb11e5f
|
0.0.1
|
hfopenllm_v2/Daemontatox_Zirel-7B-Math/1762652579.531958
|
1762652579.531959
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/Zirel-7B-Math
|
Daemontatox/Zirel-7B-Math
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6638785090227264}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5447698777469486}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.19788519637462235}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3263422818791946}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.47891666666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4237034574468085}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/AetherDrake-SFT
|
843cbaa0-5d9d-47a8-ae69-fe38a5812136
|
0.0.1
|
hfopenllm_v2/Daemontatox_AetherDrake-SFT/1762652579.524555
|
1762652579.524556
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/AetherDrake-SFT
|
Daemontatox/AetherDrake-SFT
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4812796712722244}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.48720075507220245}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1510574018126888}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.32046979865771813}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40884375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34990026595744683}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 8.03}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/AetherTOT
|
8ac4547d-2b57-4227-a63d-05da4f3ccbc7
|
0.0.1
|
hfopenllm_v2/Daemontatox_AetherTOT/1762652579.5251331
|
1762652579.5251389
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/AetherTOT
|
Daemontatox/AetherTOT
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4397642699149368}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5066056342472064}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1487915407854985}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3238255033557047}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4078541666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38040226063829785}}]
|
{"precision": "float16", "architecture": "MllamaForConditionalGeneration", "params_billions": 10.67}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/AetherTOT
|
fa9282c6-7820-49dd-9893-9559c5a984a9
|
0.0.1
|
hfopenllm_v2/Daemontatox_AetherTOT/1762652579.5253801
|
1762652579.525381
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/AetherTOT
|
Daemontatox/AetherTOT
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43829040279790954}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5034307630533988}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.14425981873111782}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3238255033557047}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40518750000000003}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37782579787234044}}]
|
{"precision": "bfloat16", "architecture": "MllamaForConditionalGeneration", "params_billions": 10.67}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/DocumentCogito
|
9a638bb6-f16f-496b-a974-d97dbb6cd626
|
0.0.1
|
hfopenllm_v2/Daemontatox_DocumentCogito/1762652579.527227
|
1762652579.5272279
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/DocumentCogito
|
Daemontatox/DocumentCogito
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7770349339751859}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5186726621665779}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.21978851963746224}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2936241610738255}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39105208333333336}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3737533244680851}}]
|
{"precision": "float16", "architecture": "MllamaForConditionalGeneration", "params_billions": 10.67}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/DocumentCogito
|
6d2a742b-adde-4b6d-90d4-ebefbb2b61be
|
0.0.1
|
hfopenllm_v2/Daemontatox_DocumentCogito/1762652579.5270069
|
1762652579.527008
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/DocumentCogito
|
Daemontatox/DocumentCogito
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5064340394597445}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5111563719111275}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.16314199395770393}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3162751677852349}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3973125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38023603723404253}}]
|
{"precision": "bfloat16", "architecture": "MllamaForConditionalGeneration", "params_billions": 10.67}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/CogitoZ14
|
024f23d8-66b0-4a7b-be01-fd68f0ab295e
|
0.0.1
|
hfopenllm_v2/Daemontatox_CogitoZ14/1762652579.526777
|
1762652579.5267782
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/CogitoZ14
|
Daemontatox/CogitoZ14
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6637034180419066}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6297514788808327}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42220543806646527}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3162751677852349}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.405875}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39993351063829785}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 14.77}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/RA2.0
|
3baf9882-5625-47eb-a88b-b172dfc9a330
|
0.0.1
|
hfopenllm_v2/Daemontatox_RA2.0/1762652579.53008
|
1762652579.530081
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/RA2.0
|
Daemontatox/RA2.0
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37838934028378035}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4888687006782508}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.38368580060422963}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3053691275167785}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40912499999999996}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26163563829787234}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/MawaredT1
|
1e87d1ea-59df-4c1a-96da-31e12e27dab2
|
0.0.1
|
hfopenllm_v2/Daemontatox_MawaredT1/1762652579.527918
|
1762652579.527919
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/MawaredT1
|
Daemontatox/MawaredT1
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41988036188424493}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5214815439293661}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3021148036253776}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3347315436241611}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.47020833333333334}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4718251329787234}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/RA_Reasoner
|
ab74d5ca-6c80-44de-96e9-af61861090b6
|
0.0.1
|
hfopenllm_v2/Daemontatox_RA_Reasoner/1762652579.530283
|
1762652579.530284
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/RA_Reasoner
|
Daemontatox/RA_Reasoner
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.559215104810791}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6053692417205033}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2122356495468278}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3313758389261745}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3963541666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43001994680851063}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 10.306}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/Mini_QwQ
|
7d5c59eb-c6fb-414a-9e4e-44d1d56f7401
|
0.0.1
|
hfopenllm_v2/Daemontatox_Mini_QwQ/1762652579.528199
|
1762652579.5282
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/Mini_QwQ
|
Daemontatox/Mini_QwQ
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.44970566984490046}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.554898906584336}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.41918429003021146}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3036912751677852}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46825}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.437250664893617}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/Cogito-MIS
|
822268e0-8f66-4bb3-9d01-52c684ca281f
|
0.0.1
|
hfopenllm_v2/Daemontatox_Cogito-MIS/1762652579.525943
|
1762652579.5259452
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/Cogito-MIS
|
Daemontatox/Cogito-MIS
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.18145188100905596}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5059981143086196}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.08610271903323263}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25671140939597314}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.37676041666666665}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.14353390957446807}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 23.572}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/NemoR
|
a2da90e0-5f59-4c89-b819-316d2cc318be
|
0.0.1
|
hfopenllm_v2/Daemontatox_NemoR/1762652579.528459
|
1762652579.528459
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/NemoR
|
Daemontatox/NemoR
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2287375275380435}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5194067688446361}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.08308157099697885}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3271812080536913}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39080208333333327}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.32903922872340424}}]
|
{"precision": "bfloat16", "architecture": "MistralForCausalLM", "params_billions": 6.124}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/RA_Reasoner2.0
|
5cf9872a-6d67-4b42-bfe4-abad05bdd9cf
|
0.0.1
|
hfopenllm_v2/Daemontatox_RA_Reasoner2.0/1762652579.530484
|
1762652579.530485
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/RA_Reasoner2.0
|
Daemontatox/RA_Reasoner2.0
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5366339091388627}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6062469551969276}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2311178247734139}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.32466442953020136}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3883541666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4353390957446808}}]
|
{"precision": "float16", "architecture": "LlamaForCausalLM", "params_billions": 10.306}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/AetherSett
|
791a8f9f-5c85-42e5-a06d-270118b0c7c2
|
0.0.1
|
hfopenllm_v2/Daemontatox_AetherSett/1762652579.524883
|
1762652579.524884
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/AetherSett
|
Daemontatox/AetherSett
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5369586031729146}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5451624435465484}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3972809667673716}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.30788590604026844}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.46031249999999996}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4278590425531915}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/mini-Cogito-R1
|
faac8ed1-1042-42dc-9762-3f90161fb34f
|
0.0.1
|
hfopenllm_v2/Daemontatox_mini-Cogito-R1/1762652579.532486
|
1762652579.532487
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/mini-Cogito-R1
|
Daemontatox/mini-Cogito-R1
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2298368329366082}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3280491875175077}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27492447129909364}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.28691275167785235}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34469791666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.14818816489361702}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.777}
|
HF Open LLM v2
|
Daemontatox
|
Daemontatox/PathFinderAi3.0
|
ba3924c6-f913-4094-a56a-1699f07f103c
|
0.0.1
|
hfopenllm_v2/Daemontatox_PathFinderAi3.0/1762652579.5289202
|
1762652579.5289202
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
Daemontatox/PathFinderAi3.0
|
Daemontatox/PathFinderAi3.0
|
Daemontatox
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.42709898624538445}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6884221416328996}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5045317220543807}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4085570469798658}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4806875}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5757147606382979}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 32.764}
|
HF Open LLM v2
|
experiment-llm
|
experiment-llm/exp-3-q-r
|
7d72dcb1-bc5d-41bf-b333-c21e67b0acd2
|
0.0.1
|
hfopenllm_v2/experiment-llm_exp-3-q-r/1762652580.148931
|
1762652580.148932
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
experiment-llm/exp-3-q-r
|
experiment-llm/exp-3-q-r
|
experiment-llm
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6035785050333116}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5397159253811645}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.27870090634441086}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2936241610738255}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43154166666666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43159906914893614}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 7.616}
|
HF Open LLM v2
|
goulue5
|
goulue5/merging_LLM
|
a7fb7d77-93c3-41c8-a85a-692953dcd2c6
|
0.0.1
|
hfopenllm_v2/goulue5_merging_LLM/1762652580.1806688
|
1762652580.18067
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
goulue5/merging_LLM
|
goulue5/merging_LLM
|
goulue5
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.32326006108237254}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4216498611590102}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.09667673716012085}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2911073825503356}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43328125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29579454787234044}}]
|
{"precision": "float16", "architecture": "Qwen2ForCausalLM", "params_billions": 1.544}
|
HF Open LLM v2
|
cpayne1303
|
cpayne1303/cp2024-instruct
|
247e1c1e-ce27-4645-a2ae-4177f08ea4a5
|
0.0.1
|
hfopenllm_v2/cpayne1303_cp2024-instruct/1762652580.116854
|
1762652580.116854
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
cpayne1303/cp2024-instruct
|
cpayne1303/cp2024-instruct
|
cpayne1303
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.17061064641817045}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2946778102988436}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.0}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2600671140939597}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3686354166666666}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11668882978723404}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 0.031}
|
HF Open LLM v2
|
cpayne1303
|
cpayne1303/smallcp2024
|
fcbede38-3a5b-4cd7-b144-cbf26cc05df9
|
0.0.1
|
hfopenllm_v2/cpayne1303_smallcp2024/1762652580.117528
|
1762652580.117528
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
cpayne1303/smallcp2024
|
cpayne1303/smallcp2024
|
cpayne1303
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1581958093414363}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3027047714604053}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.005287009063444109}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.23070469798657717}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.34246874999999993}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11136968085106383}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 0.002}
|
HF Open LLM v2
|
cpayne1303
|
cpayne1303/cp2024
|
2bfb7bea-a344-4249-8bdc-e6c483518df5
|
0.0.1
|
hfopenllm_v2/cpayne1303_cp2024/1762652580.116582
|
1762652580.1165829
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
cpayne1303/cp2024
|
cpayne1303/cp2024
|
cpayne1303
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.16581448334862608}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.29853854089245085}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.005287009063444109}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2558724832214765}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3383125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11012300531914894}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 0.031}
|
HF Open LLM v2
|
macadeliccc
|
macadeliccc/magistrate-3.2-3b-base
|
e0f596ba-89ee-4fa7-b5dc-698c2a5fda95
|
0.0.1
|
hfopenllm_v2/macadeliccc_magistrate-3.2-3b-base/1762652580.32929
|
1762652580.329291
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
macadeliccc/magistrate-3.2-3b-base
|
macadeliccc/magistrate-3.2-3b-base
|
macadeliccc
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1159301763764589}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3342701056047533}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.011329305135951661}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2609060402684564}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.39759374999999997}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.16888297872340424}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 3.213}
|
HF Open LLM v2
|
macadeliccc
|
macadeliccc/magistrate-3.2-3b-it
|
df26db97-8e5e-409e-937d-45951c81a8cd
|
0.0.1
|
hfopenllm_v2/macadeliccc_magistrate-3.2-3b-it/1762652580.329552
|
1762652580.329552
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
macadeliccc/magistrate-3.2-3b-it
|
macadeliccc/magistrate-3.2-3b-it
|
macadeliccc
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.22918744486850445}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3256506790327196}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.019637462235649546}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24748322147651006}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3763229166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.15924202127659576}}]
|
{"precision": "bfloat16", "architecture": "LlamaForCausalLM", "params_billions": 3.213}
|
HF Open LLM v2
|
EleutherAI
|
EleutherAI/pythia-6.9b
|
6ae207e3-2596-4b28-b058-d47d07465192
|
0.0.1
|
hfopenllm_v2/EleutherAI_pythia-6.9b/1762652579.595358
|
1762652579.595359
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
EleutherAI/pythia-6.9b
|
EleutherAI/pythia-6.9b
|
EleutherAI
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.22811362739752744}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3232287869322383}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.014350453172205438}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2516778523489933}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3590520833333333}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.1146941489361702}}]
|
{"precision": "float16", "architecture": "GPTNeoXForCausalLM", "params_billions": 6.9}
|
HF Open LLM v2
|
EleutherAI
|
EleutherAI/pythia-1.4b
|
e268be37-589d-41f2-af98-a85bb412eb44
|
0.0.1
|
hfopenllm_v2/EleutherAI_pythia-1.4b/1762652579.593903
|
1762652579.593904
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
EleutherAI/pythia-1.4b
|
EleutherAI/pythia-1.4b
|
EleutherAI
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.23708094522533543}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.315042649740714}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.015105740181268883}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.26174496644295303}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35378125}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11228390957446809}}]
|
{"precision": "float16", "architecture": "GPTNeoXForCausalLM", "params_billions": 1.515}
|
HF Open LLM v2
|
EleutherAI
|
EleutherAI/pythia-2.8b
|
0afcbde6-b822-4264-8733-bc255ea73314
|
0.0.1
|
hfopenllm_v2/EleutherAI_pythia-2.8b/1762652579.594833
|
1762652579.5948339
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
EleutherAI/pythia-2.8b
|
EleutherAI/pythia-2.8b
|
EleutherAI
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.21732226049105263}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3224085936276087}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.013595166163141994}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3485729166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11369680851063829}}]
|
{"precision": "float16", "architecture": "GPTNeoXForCausalLM", "params_billions": 2.909}
|
HF Open LLM v2
|
EleutherAI
|
EleutherAI/pythia-12b
|
4df16bb2-996f-473f-9096-a8a8e152ca9b
|
0.0.1
|
hfopenllm_v2/EleutherAI_pythia-12b/1762652579.5942001
|
1762652579.594201
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
EleutherAI/pythia-12b
|
EleutherAI/pythia-12b
|
EleutherAI
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24714756845170813}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3179653957935337}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.01661631419939577}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.24664429530201343}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3646979166666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11087101063829788}}]
|
{"precision": "float16", "architecture": "GPTNeoXForCausalLM", "params_billions": 12.0}
|
HF Open LLM v2
|
EleutherAI
|
EleutherAI/pythia-160m
|
d59ad4b0-e58e-48d6-90eb-93398c46251a
|
0.0.1
|
hfopenllm_v2/EleutherAI_pythia-160m/1762652579.5944068
|
1762652579.594408
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
EleutherAI/pythia-160m
|
EleutherAI/pythia-160m
|
EleutherAI
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.18155161637787737}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2970437484241321}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.00906344410876133}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25838926174496646}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4179375}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11195146276595745}}]
|
{"precision": "float16", "architecture": "GPTNeoXForCausalLM", "params_billions": 0.213}
|
HF Open LLM v2
|
EleutherAI
|
EleutherAI/pythia-1b
|
a21cc55c-e9df-46ef-beed-b67a1750ddb7
|
0.0.1
|
hfopenllm_v2/EleutherAI_pythia-1b/1762652579.594618
|
1762652579.594618
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
EleutherAI/pythia-1b
|
EleutherAI/pythia-1b
|
EleutherAI
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.2207941594968018}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3004093017564394}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.00906344410876133}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25671140939597314}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35520833333333335}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11361369680851063}}]
|
{"precision": "float16", "architecture": "GPTNeoXForCausalLM", "params_billions": 1.079}
|
HF Open LLM v2
|
EleutherAI
|
EleutherAI/pythia-410m
|
c9db5f06-9aac-4678-bfe0-65773ece4558
|
0.0.1
|
hfopenllm_v2/EleutherAI_pythia-410m/1762652579.5950441
|
1762652579.595045
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
EleutherAI/pythia-410m
|
EleutherAI/pythia-410m
|
EleutherAI
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.21954525104500505}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.302813387064426}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.009818731117824773}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.25922818791946306}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.35781250000000003}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.11278257978723404}}]
|
{"precision": "float16", "architecture": "GPTNeoXForCausalLM", "params_billions": 0.506}
|
HF Open LLM v2
|
zetasepic
|
zetasepic/Qwen2.5-72B-Instruct-abliterated
|
78799fe1-5fbd-4023-9462-8d826dac41d5
|
0.0.1
|
hfopenllm_v2/zetasepic_Qwen2.5-72B-Instruct-abliterated/1762652580.632342
|
1762652580.632343
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
zetasepic/Qwen2.5-72B-Instruct-abliterated
|
zetasepic/Qwen2.5-72B-Instruct-abliterated
|
zetasepic
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7152610628687439}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.7152257183282452}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5241691842900302}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.40687919463087246}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.4719166666666667}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5871841755319149}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 72.706}
|
HF Open LLM v2
|
zetasepic
|
zetasepic/Qwen2.5-32B-Instruct-abliterated-v2
|
a5490bf2-6d11-4474-b6e5-07a79d30f431
|
0.0.1
|
hfopenllm_v2/zetasepic_Qwen2.5-32B-Instruct-abliterated-v2/1762652580.6318998
|
1762652580.631902
|
["https://open-llm-leaderboard-open-llm-leaderboard.hf.space/api/leaderboard/formatted"]
|
HF Open LLM v2
|
leaderboard
|
Hugging Face
| null | null |
third_party
|
zetasepic/Qwen2.5-32B-Instruct-abliterated-v2
|
zetasepic/Qwen2.5-32B-Instruct-abliterated-v2
|
zetasepic
|
unknown
|
[{"evaluation_name": "IFEval", "metric_config": {"evaluation_description": "Accuracy on IFEval", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.8334131216283904}}, {"evaluation_name": "BBH", "metric_config": {"evaluation_description": "Accuracy on BBH", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.6934020817780425}}, {"evaluation_name": "MATH Level 5", "metric_config": {"evaluation_description": "Exact Match on MATH Level 5", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.595166163141994}}, {"evaluation_name": "GPQA", "metric_config": {"evaluation_description": "Accuracy on GPQA", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.3674496644295302}}, {"evaluation_name": "MUSR", "metric_config": {"evaluation_description": "Accuracy on MUSR", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.43542708333333335}}, {"evaluation_name": "MMLU-PRO", "metric_config": {"evaluation_description": "Accuracy on MMLU-PRO", "lower_is_better": false, "score_type": "continuous", "min_score": 0, "max_score": 1}, "score_details": {"score": 0.5621675531914894}}]
|
{"precision": "bfloat16", "architecture": "Qwen2ForCausalLM", "params_billions": 32.764}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.