Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -7,8 +7,6 @@ This follows the inference example in README.md and uses the
|
|
| 7 |
|
| 8 |
from __future__ import annotations
|
| 9 |
|
| 10 |
-
import asyncio
|
| 11 |
-
import atexit
|
| 12 |
import os
|
| 13 |
import sys
|
| 14 |
from typing import Optional, List, Tuple
|
|
@@ -39,36 +37,6 @@ _MODEL: Optional[AutoModelForCausalLM] = None
|
|
| 39 |
_PROCESSOR: Optional[AutoProcessor] = None
|
| 40 |
|
| 41 |
|
| 42 |
-
def _close_default_event_loop() -> None:
|
| 43 |
-
"""Close the default asyncio loop to avoid fd errors on interpreter exit."""
|
| 44 |
-
try:
|
| 45 |
-
loop = asyncio.get_event_loop()
|
| 46 |
-
except RuntimeError:
|
| 47 |
-
return
|
| 48 |
-
|
| 49 |
-
if loop.is_closed():
|
| 50 |
-
return
|
| 51 |
-
|
| 52 |
-
# If the loop is still running, ask it to stop; closing while running raises.
|
| 53 |
-
if loop.is_running():
|
| 54 |
-
loop.stop()
|
| 55 |
-
|
| 56 |
-
try:
|
| 57 |
-
loop.run_until_complete(loop.shutdown_asyncgens())
|
| 58 |
-
except RuntimeError:
|
| 59 |
-
# If the loop was already running in another context, just try closing.
|
| 60 |
-
pass
|
| 61 |
-
|
| 62 |
-
try:
|
| 63 |
-
loop.close()
|
| 64 |
-
except Exception:
|
| 65 |
-
# Best-effort cleanup; swallow any finalizer noise.
|
| 66 |
-
pass
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
atexit.register(_close_default_event_loop)
|
| 70 |
-
|
| 71 |
-
|
| 72 |
def load_model_and_processor() -> Tuple[AutoModelForCausalLM, AutoProcessor]:
|
| 73 |
"""Lazy-load the QTSplus model and processor."""
|
| 74 |
global _MODEL, _PROCESSOR
|
|
@@ -103,8 +71,8 @@ def load_model_and_processor() -> Tuple[AutoModelForCausalLM, AutoProcessor]:
|
|
| 103 |
return _MODEL, _PROCESSOR
|
| 104 |
|
| 105 |
|
| 106 |
-
# Preload model and processor at import time (for faster first inference).
|
| 107 |
-
load_model_and_processor()
|
| 108 |
|
| 109 |
|
| 110 |
def build_messages(video: Optional[str], prompt: str) -> List[dict]:
|
|
@@ -231,9 +199,4 @@ with gr.Blocks() as demo:
|
|
| 231 |
|
| 232 |
|
| 233 |
if __name__ == "__main__":
|
| 234 |
-
|
| 235 |
-
demo.queue().launch()
|
| 236 |
-
finally:
|
| 237 |
-
# Ensure Gradio and asyncio clean up cleanly to prevent fd errors.
|
| 238 |
-
demo.close()
|
| 239 |
-
_close_default_event_loop()
|
|
|
|
| 7 |
|
| 8 |
from __future__ import annotations
|
| 9 |
|
|
|
|
|
|
|
| 10 |
import os
|
| 11 |
import sys
|
| 12 |
from typing import Optional, List, Tuple
|
|
|
|
| 37 |
_PROCESSOR: Optional[AutoProcessor] = None
|
| 38 |
|
| 39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
def load_model_and_processor() -> Tuple[AutoModelForCausalLM, AutoProcessor]:
|
| 41 |
"""Lazy-load the QTSplus model and processor."""
|
| 42 |
global _MODEL, _PROCESSOR
|
|
|
|
| 71 |
return _MODEL, _PROCESSOR
|
| 72 |
|
| 73 |
|
| 74 |
+
# # Preload model and processor at import time (for faster first inference).
|
| 75 |
+
# load_model_and_processor()
|
| 76 |
|
| 77 |
|
| 78 |
def build_messages(video: Optional[str], prompt: str) -> List[dict]:
|
|
|
|
| 199 |
|
| 200 |
|
| 201 |
if __name__ == "__main__":
|
| 202 |
+
demo.queue().launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|