Spaces:
Running
Running
| import tensorflow as tf | |
| from tensorflow.keras.models import load_model as keras_load_model | |
| import os | |
| from huggingface_hub import snapshot_download | |
| import shutil | |
| # Constants | |
| REPO_ID = "can-org/AI-VS-HUMAN-IMAGE-classifier" | |
| MODEL_DIR = "./IMG_models" | |
| MODEL_PATH = os.path.join(MODEL_DIR, 'latest-my_cnn_model.h5') # adjust path as needed | |
| _model_img = None # global model variable | |
| def warmup(): | |
| global _model_img | |
| if not os.path.exists(MODEL_DIR): | |
| download_model_Repo() | |
| _model_img = load_model() | |
| def download_model_Repo(): | |
| if os.path.exists(MODEL_DIR): | |
| return | |
| snapshot_path = snapshot_download(repo_id=REPO_ID) | |
| os.makedirs(MODEL_DIR, exist_ok=True) | |
| shutil.copytree(snapshot_path, MODEL_DIR, dirs_exist_ok=True) | |
| def load_model(): | |
| if not os.path.exists(MODEL_DIR): | |
| download_model_Repo() | |
| # Check for GPU availability | |
| gpus = tf.config.list_physical_devices('GPU') | |
| if gpus: | |
| # GPU is available, load model normally | |
| print("GPU detected, loading model on GPU.") | |
| model = keras_load_model(MODEL_PATH) | |
| else: | |
| # No GPU, force CPU usage | |
| print("No GPU detected, forcing model loading on CPU.") | |
| with tf.device('/CPU:0'): | |
| model = keras_load_model(MODEL_PATH) | |
| return model | |