helenai commited on
Commit
cc9720d
·
1 Parent(s): cbe2ad1

Initial commit

Browse files
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.github/workflows/space.yml ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Bump requirements.txt # bump requirements.txt (to force reinstall in space), run tests, push to HF hub
2
+
3
+ on:
4
+ schedule:
5
+ - cron: '0 05 * * *' # daily at 05:00 UTC
6
+ workflow_dispatch:
7
+
8
+ jobs:
9
+ update-and-sync:
10
+ runs-on: ubuntu-latest
11
+ steps:
12
+ - uses: actions/checkout@v3
13
+ with:
14
+ fetch-depth: 0
15
+ token: ${{ secrets.GH_TOKEN }}
16
+ - name: Setup Python 3.12
17
+ uses: actions/setup-python@v5
18
+ with:
19
+ python-version: "3.12"
20
+ - name: Add timestamp
21
+ run: |
22
+ python - <<'PY'
23
+ import datetime
24
+ with open("requirements.txt", "r", encoding="utf-8") as f:
25
+ req_content = f.read()
26
+ lines = [line for line in req_content.splitlines() if not line.startswith("# bumped:")]
27
+ ts = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
28
+ lines.append(f"# bumped: {ts}")
29
+ new_req = "\n".join(lines) + "\n"
30
+ with open("requirements.txt", "w", encoding="utf-8") as f:
31
+ f.write(new_req)
32
+ PY
33
+ git config user.name "github-actions[bot]"
34
+ git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
35
+ git add requirements.txt
36
+ git commit -m "ci: bump timestamp to trigger deps reinstall"
37
+ git push origin HEAD:main
38
+ - name: Install requirements
39
+ run: pip install -r requirements.txt
40
+ - name: Run test
41
+ run: python -m pytest test_optimum_support.py
42
+ - name: Push to hub
43
+ env:
44
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
45
+ run: git push https://helenai:[email protected]/spaces/helenai/check-optimum-intel-support main
README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Check Optimum Intel Support
3
+ emoji: 🌖
4
+ colorFrom: gray
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 6.0.2
8
+ app_file: app.py
9
+ pinned: false
10
+ short_description: Check if a model is supported by optimum-intel[openvino]
11
+ ---
12
+
app.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib.metadata
2
+ import gradio as gr
3
+ from optimum_support import show_is_supported
4
+
5
+ with gr.Blocks() as app:
6
+ gr.Markdown("# Check if model is supported by optimum-intel[openvino]")
7
+ with gr.Column():
8
+ model_id = gr.Textbox(label="model_id")
9
+ process_button = gr.Button("Check")
10
+ output_text = gr.Markdown(label="result", height=100)
11
+ optimum_intel_version = importlib.metadata.version("optimum-intel")
12
+ gr.Markdown(
13
+ f"Tested with optimum-intel {optimum_intel_version}. For testing purposes only, results may be wrong."
14
+ )
15
+
16
+ process_button.click(show_is_supported, inputs=[model_id], outputs=output_text)
17
+ model_id.submit(show_is_supported, inputs=[model_id], outputs=output_text)
18
+
19
+ app.launch()
optimum_support.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib.metadata
2
+ import inspect
3
+ import re
4
+ import subprocess
5
+ import sys
6
+ from importlib import reload
7
+ from pathlib import Path
8
+
9
+ import optimum.intel.utils.import_utils as import_utils
10
+ from huggingface_hub import HfApi
11
+ from huggingface_hub.errors import RepositoryNotFoundError
12
+ from packaging.requirements import Requirement
13
+
14
+ if Path("optimum-intel").is_dir():
15
+ subprocess.run(["git", "pull"], cwd="optimum-intel")
16
+ else:
17
+ subprocess.run(["git", "clone", "https://github.com/huggingface/optimum-intel.git"])
18
+ test_path = Path(__file__).parent / "optimum-intel" / "tests" / "openvino"
19
+ sys.path.append(str(test_path))
20
+
21
+ # Stable Diffusion does not have a model_type in the config
22
+ # Get supported diffusion classes from SUPPORTED_OV_PIPELINES, which lists all OV pipeline wrappers
23
+ import optimum.intel.openvino.modeling_diffusion as _modeling_diffusion
24
+ # Import the test modules from the cloned repository. This must be imported globally to avoid issues with reloading in Gradio
25
+ import test_decoder
26
+ import test_diffusion
27
+ import test_modeling
28
+ import test_seq2seq
29
+
30
+ SUPPORTED_DIFFUSION_CLASSES = [
31
+ cls.auto_model_class.__name__
32
+ for cls in _modeling_diffusion.SUPPORTED_OV_PIPELINES
33
+ if hasattr(cls, "auto_model_class") and cls.auto_model_class is not None
34
+ ]
35
+
36
+
37
+ def get_supported_models_for_version(version):
38
+ import_utils._transformers_version = version
39
+ test_seq2seq._transformers_version = version
40
+ test_modeling._transformers_version = version
41
+ test_diffusion._transformers_version = version
42
+ test_decoder._transformers_version = version
43
+
44
+ seq2seq = reload(test_seq2seq)
45
+ decoder = reload(test_decoder)
46
+ modeling = reload(test_modeling)
47
+ diffusion = reload(test_diffusion)
48
+
49
+ d = {}
50
+ modules = [seq2seq, decoder, modeling, diffusion]
51
+ for mod in modules:
52
+ for name, obj in inspect.getmembers(mod):
53
+ if inspect.isclass(obj):
54
+ if re.match(r"(OVModelFor.*IntegrationTest)", name) or re.match(r"(OVPipelineFor.*Test)", name):
55
+ task = name.replace("IntegrationTest", "").replace("Test", "")
56
+ if "CustomTasks" not in task:
57
+ d[task] = obj.SUPPORTED_ARCHITECTURES
58
+ all_archs = []
59
+ for archs in d.values():
60
+ all_archs += archs
61
+ return sorted(set(all_archs))
62
+
63
+
64
+ def get_min_max_transformers():
65
+ meta = importlib.metadata.metadata("optimum-intel")
66
+ requires = meta.get_all("Requires-Dist") or []
67
+ transformers_versions = [item for item in requires if "transformers" in item and "extra" not in item][0]
68
+ req = Requirement(transformers_versions)
69
+ maxver, minver = [ver.version for ver in list(req.specifier)]
70
+ return (minver, maxver)
71
+
72
+
73
+ def show_is_supported(model_id):
74
+ print(f"Checking {model_id}...")
75
+ minver, maxver = get_min_max_transformers()
76
+ versions = [minver, "4.53.0", maxver]
77
+
78
+ all_supported_models = set()
79
+ for v in versions:
80
+ archs = get_supported_models_for_version(v)
81
+ all_supported_models.update(archs)
82
+ try:
83
+ model_info = HfApi().model_info(model_id)
84
+ except RepositoryNotFoundError:
85
+ message = f"Model {model_id} was not found on the Hugging Face hub. Make sure you entered the correct model_id. If the model requires authentication, use `hf auth login` or a token to authenticate."
86
+ else:
87
+ if (model_info.config is not None) and model_info.config != {}:
88
+ model_type = model_info.config.get("model_type")
89
+ if model_type is None: # Check for diffusion class if model_type is not available
90
+ class_name = model_info.config.get("diffusers", {}).get("_class_name")
91
+ if class_name in SUPPORTED_DIFFUSION_CLASSES:
92
+ message = (
93
+ f"`{model_id}` with diffusion class `{class_name}` is **supported** by optimum-intel[openvino]."
94
+ )
95
+ else:
96
+ message = f"`{model_id}` is not in the list of supported architectures by optimum-intel[openvino]. It is **likely not supported**, but it is wise to doublecheck"
97
+ elif model_type in all_supported_models:
98
+ message = f"`{model_id}` with model type `{model_type}` is **supported** by optimum-intel[openvino]."
99
+ else:
100
+ message = f"`{model_id}` with model type `{model_type}` is not in the list of supported architectures by optimum-intel[openvino]. It is **likely not supported**, but it is wise to doublecheck"
101
+ else:
102
+ message = f"`{model_id}` is **not supported** by optimum-intel[openvino]."
103
+ print(f"Using transformers: {versions}. Total number of supported architectures: {len(all_supported_models)}")
104
+ print(message)
105
+ return message
106
+
107
+
108
+ if __name__ == "__main__":
109
+ show_is_supported(sys.argv[1])
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ --extra-index-url "https://download.pytorch.org/whl/cpu"
2
+ optimum-intel[tests,openvino,diffusers]@git+https://github.com/huggingface/optimum-intel
3
+ packaging
4
+ huggingface-hub
5
+ # bumped: 2025-12-03T11:57:13Z
test_optimum_support.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ from optimum_support import show_is_supported
4
+
5
+ # (model_id, expected_substring)
6
+ test_cases = [
7
+ ("openai/whisper-small", "is **supported**"),
8
+ ("Ultralytics/YOLO11", "is **not supported**"),
9
+ ("test/non-existing", "was not found on the Hugging Face hub"),
10
+ ("openai/gpt-oss-20b", "is **supported**"),
11
+ ("ibm-granite/granite-3.1-8b-instruct", "is **supported**"),
12
+ ("stabilityai/stable-diffusion-xl-base-1.0", "is **supported**"),
13
+ ("microsoft/Phi-4-multimodal-instruct", "is **supported**"),
14
+ ("google-bert/bert-base-uncased", "is **supported**"),
15
+ ("rednote-hilab/dots.ocr", "is not in the list of supported architectures"),
16
+ ("LiquidAI/LFM2-350M", "is **supported**"),
17
+ ("google/mobilenet_v2_1.0_224", "is **supported**"),
18
+ ("stabilityai/stable-diffusion-3.5-large", "is **supported**"),
19
+ ("stabilityai/sp4d", "is **not supported**"),
20
+ ("SimianLuo/LCM_Dreamshaper_v7", "is **supported**"),
21
+ ("stabilityai/sd-x2-latent-upscaler", "is not in the list"),
22
+ ("openbmb/MiniCPM3-4B", "is **supported**"),
23
+ ("Efficient-Large-Model/SANA-Video_2B_480p", "is **not supported**"),
24
+ ("optimum-intel-internal-testing/tiny-random-sana-sprint", "is **supported**"),
25
+ ]
26
+
27
+
28
+ @pytest.mark.parametrize("model_id,expected", test_cases)
29
+ def test_show_is_supported(model_id, expected):
30
+ result = show_is_supported(model_id)
31
+ assert expected in result, f"For {model_id}, expected '{expected}' in result, got: {result}"