Spaces:
Running
on
Zero
Running
on
Zero
merge with hf
Browse files
app.py
CHANGED
|
@@ -443,7 +443,7 @@ with gr.Blocks(css=css, title="DKT", head=head_html) as demo:
|
|
| 443 |
if __name__ == '__main__':
|
| 444 |
|
| 445 |
#* main code, model and moge model initialization
|
| 446 |
-
demo.queue().launch(
|
| 447 |
|
| 448 |
|
| 449 |
|
|
|
|
| 443 |
if __name__ == '__main__':
|
| 444 |
|
| 445 |
#* main code, model and moge model initialization
|
| 446 |
+
demo.queue().launch()
|
| 447 |
|
| 448 |
|
| 449 |
|
debug.py
DELETED
|
@@ -1,77 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
import os
|
| 3 |
-
from loguru import logger
|
| 4 |
-
from moge.model.v2 import MoGeModel
|
| 5 |
-
|
| 6 |
-
from tools.eval_utils import colorize_depth_map
|
| 7 |
-
|
| 8 |
-
import cv2
|
| 9 |
-
|
| 10 |
-
video_path = 'examples/1.mp4'
|
| 11 |
-
frames = []
|
| 12 |
-
|
| 13 |
-
cap = cv2.VideoCapture(video_path)
|
| 14 |
-
while True:
|
| 15 |
-
ret, frame = cap.read()
|
| 16 |
-
if not ret:
|
| 17 |
-
break
|
| 18 |
-
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 19 |
-
frames.append(frame_rgb)
|
| 20 |
-
cap.release()
|
| 21 |
-
|
| 22 |
-
print(f"Loaded {len(frames)} frames from {video_path}")
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
# device= torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 27 |
-
|
| 28 |
-
# cached_model_path = 'checkpoints/moge_ckpt/moge-2-vitl-normal/model.pt'
|
| 29 |
-
# if os.path.exists(cached_model_path):
|
| 30 |
-
# logger.info(f"Found cached model at {cached_model_path}, loading from cache...")
|
| 31 |
-
# moge_pipe = MoGeModel.from_pretrained(cached_model_path).to(device)
|
| 32 |
-
# else:
|
| 33 |
-
# logger.info(f"Cache not found at {cached_model_path}, downloading from HuggingFace...")
|
| 34 |
-
# os.makedirs(os.path.dirname(cached_model_path), exist_ok=True)
|
| 35 |
-
# moge_pipe = MoGeModel.from_pretrained('Ruicheng/moge-2-vitl-normal', cache_dir=os.path.dirname(cached_model_path)).to(device)
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
#* save small video
|
| 41 |
-
|
| 42 |
-
# import numpy as np
|
| 43 |
-
|
| 44 |
-
# # Save the first 5 frames as new mp4
|
| 45 |
-
# out_path = "first5_output.mp4"
|
| 46 |
-
# frame_subset = frames[:5]
|
| 47 |
-
# target_width, target_height = 832, 480 # width, height as required
|
| 48 |
-
# if len(frame_subset) > 0:
|
| 49 |
-
# fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
| 50 |
-
# out = cv2.VideoWriter(out_path, fourcc, 15, (target_width, target_height))
|
| 51 |
-
# for f in frame_subset:
|
| 52 |
-
# if f.dtype != np.uint8:
|
| 53 |
-
# f = np.clip(f, 0, 255).astype(np.uint8)
|
| 54 |
-
# # Resize the frame to 832x430 before writing
|
| 55 |
-
# resized_frame = cv2.resize(f, (target_width, target_height), interpolation=cv2.INTER_AREA)
|
| 56 |
-
# out.write(resized_frame)
|
| 57 |
-
# out.release()
|
| 58 |
-
# print(f"Saved first 5 frames to {out_path} (resized to {target_width}x{target_height})")
|
| 59 |
-
# else:
|
| 60 |
-
# print("No frames to save.")
|
| 61 |
-
|
| 62 |
-
# import torch
|
| 63 |
-
|
| 64 |
-
# demo_input = frames[0]
|
| 65 |
-
# demo_input = torch.tensor(demo_input / 255, dtype=torch.float32, device=device).permute(2, 0, 1)
|
| 66 |
-
|
| 67 |
-
# print(demo_input.max(),demo_input.min(), demo_input.shape)
|
| 68 |
-
# moge_prediction = moge_pipe.infer(demo_input)
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
# moge_depth = moge_prediction['depth'].cpu().numpy()
|
| 72 |
-
# logger.info(f'moge input shape: {demo_input.shape}, input image mean: {demo_input.mean()}, std:{demo_input.std()}, moge_depth:{moge_depth.mean()}, moge_depth: {moge_depth.min()}, moge_depth.max():{moge_depth.max()}')
|
| 73 |
-
# print(f'moge input shape: {demo_input.shape}, input image mean: {demo_input.mean()}, std:{demo_input.std()}, moge_depth:{moge_depth.mean()}, moge_depth: {moge_depth.min()}, moge_depth.max():{moge_depth.max()}')
|
| 74 |
-
|
| 75 |
-
# moge_depth = colorize_depth_map(moge_depth)
|
| 76 |
-
|
| 77 |
-
# moge_depth.save('debug.jpg')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
debug2.py
DELETED
|
@@ -1,11 +0,0 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
from app import *
|
| 4 |
-
|
| 5 |
-
video_file = 'examples/1.mp4'
|
| 6 |
-
|
| 7 |
-
results = process_video(video_file,'1.3B',1,1)
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
for k in results:
|
| 11 |
-
print(k)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|