File size: 1,690 Bytes
c6c8587
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a3e1f56
 
 
 
 
c6c8587
 
a3e1f56
c6c8587
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
"""
HuggingFace Spaces entry point for Visualisable.ai
This runs the unified backend service for production deployment
"""

import os
import sys
import asyncio
import logging
from pathlib import Path

# Add backend to path
sys.path.append(str(Path(__file__).parent / "backend"))

# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# Import the FastAPI app from model_service
from backend.model_service import app

# HuggingFace Spaces specific configuration
if os.getenv("SPACE_ID"):
    # Running on HuggingFace Spaces
    logger.info(f"Running on HuggingFace Spaces: {os.getenv('SPACE_ID')}")
    
    # Set cache directories for HuggingFace Spaces
    os.environ["TRANSFORMERS_CACHE"] = "/tmp/transformers_cache"
    os.environ["HF_HOME"] = "/tmp/hf_home"
    os.environ["HUGGINGFACE_HUB_CACHE"] = "/tmp/huggingface_hub"
    
    # Set production environment variables
    os.environ["ENVIRONMENT"] = "production"
    os.environ["MODEL_DEVICE"] = os.getenv("MODEL_DEVICE", "cpu")  # Use CPU for HF Spaces free tier
    
    # Enable CORS for the Space URL
    space_host = os.getenv("SPACE_HOST", "")
    if space_host:
        from fastapi.middleware.cors import CORSMiddleware
        app.add_middleware(
            CORSMiddleware,
            allow_origins=[f"https://{space_host}", "http://localhost:3000"],
            allow_credentials=True,
            allow_methods=["*"],
            allow_headers=["*"],
        )

# Export the app for Gradio or direct serving
if __name__ == "__main__":
    import uvicorn
    port = int(os.getenv("PORT", 7860))  # HuggingFace Spaces default port
    uvicorn.run(app, host="0.0.0.0", port=port)