ishanprogs commited on
Commit
2d078c6
·
verified ·
1 Parent(s): f475ae3

Upload 5 files

Browse files
Files changed (5) hide show
  1. app.py +376 -0
  2. best.pt +3 -0
  3. clip_text_features.pt +3 -0
  4. partdetection yolobest.pt +3 -0
  5. requirements.txt +24 -0
app.py ADDED
@@ -0,0 +1,376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import clip
4
+ from PIL import Image
5
+ import numpy as np
6
+ import os
7
+ import cv2
8
+ import gc # Garbage collector
9
+ import logging
10
+ import random # For annotator colors
11
+
12
+ # --- YOLOv8 Imports ---
13
+ from ultralytics import YOLO
14
+ from ultralytics.utils.plotting import Annotator # For drawing YOLO results
15
+
16
+ # --- Setup Logging ---
17
+ logging.getLogger("ultralytics").setLevel(logging.WARNING) # Reduce YOLO logging noise
18
+ logging.basicConfig(level=logging.INFO)
19
+ logger = logging.getLogger(__name__)
20
+
21
+ # --- Constants ---
22
+ # Damage segmentation classes (Order MUST match the training of 'model_best.pt')
23
+ DAMAGE_CLASSES = ['Cracked', 'Scratch', 'Flaking', 'Broken part', 'Corrosion', 'Dent', 'Paint chip', 'Missing part']
24
+ NUM_DAMAGE_CLASSES = len(DAMAGE_CLASSES)
25
+
26
+ # Part segmentation classes (Order MUST match the training of 'partdetection_yolobest.pt')
27
+ CAR_PART_CLASSES = [
28
+ "Quarter-panel", "Front-wheel", "Back-window", "Trunk", "Front-door",
29
+ "Rocker-panel", "Grille", "Windshield", "Front-window", "Back-door",
30
+ "Headlight", "Back-wheel", "Back-windshield", "Hood", "Fender",
31
+ "Tail-light", "License-plate", "Front-bumper", "Back-bumper", "Mirror",
32
+ "Roof"
33
+ ]
34
+ NUM_CAR_PART_CLASSES = len(CAR_PART_CLASSES)
35
+
36
+
37
+ # Paths within the Hugging Face Space repository
38
+ CLIP_TEXT_FEATURES_PATH = "./clip_text_features.pt"
39
+ DAMAGE_MODEL_WEIGHTS_PATH = "./best.pt" # <--- Your YOLOv8 damage model weights
40
+ PART_MODEL_WEIGHTS_PATH = "./partdetection_yolobest.pt" # <--- Your YOLOv8 part model weights
41
+
42
+ # Prediction Thresholds
43
+ DAMAGE_PRED_THRESHOLD = 0.4 # Threshold for showing damage masks
44
+ PART_PRED_THRESHOLD = 0.3 # Threshold for showing part masks
45
+
46
+ # --- Device Setup ---
47
+ if torch.cuda.is_available():
48
+ DEVICE = "cuda"
49
+ logger.info("CUDA available, using GPU.")
50
+ else:
51
+ DEVICE = "cpu"
52
+ logger.info("CUDA not available, using CPU.")
53
+
54
+ # --- MODEL LOADING (Load models globally ONCE on startup) ---
55
+ print("Loading models...")
56
+ clip_model = None
57
+ clip_preprocess = None
58
+ clip_text_features = None
59
+ damage_model = None
60
+ part_model = None
61
+
62
+ # --- Load CLIP Model (Model 1) ---
63
+ try:
64
+ logger.info("Loading CLIP model...")
65
+ clip_model, clip_preprocess = clip.load("ViT-B/16", device=DEVICE)
66
+ clip_model.eval()
67
+ logger.info("CLIP model loaded.")
68
+
69
+ logger.info(f"Loading CLIP text features from {CLIP_TEXT_FEATURES_PATH}...")
70
+ if not os.path.exists(CLIP_TEXT_FEATURES_PATH):
71
+ raise FileNotFoundError(f"CLIP text features not found: {CLIP_TEXT_FEATURES_PATH}.")
72
+ clip_text_features = torch.load(CLIP_TEXT_FEATURES_PATH, map_location=DEVICE)
73
+ logger.info("CLIP text features loaded.")
74
+ except Exception as e:
75
+ logger.error(f"Error loading CLIP model or features: {e}", exc_info=True)
76
+ # Allow app to continue, functions will check for None
77
+
78
+ # --- Load Damage Segmentation Model (Model 2 - YOLOv8) ---
79
+ try:
80
+ logger.info(f"Loading Damage Segmentation (YOLOv8) model from {DAMAGE_MODEL_WEIGHTS_PATH}...")
81
+ if not os.path.exists(DAMAGE_MODEL_WEIGHTS_PATH):
82
+ raise FileNotFoundError(f"Damage model weights not found: {DAMAGE_MODEL_WEIGHTS_PATH}.")
83
+ damage_model = YOLO(DAMAGE_MODEL_WEIGHTS_PATH)
84
+ damage_model.to(DEVICE) # Ensure model is on the correct device
85
+ # Verify class names match
86
+ loaded_damage_names = list(damage_model.names.values())
87
+ if loaded_damage_names != DAMAGE_CLASSES:
88
+ logger.warning(f"Mismatch between defined DAMAGE_CLASSES and names in {DAMAGE_MODEL_WEIGHTS_PATH}")
89
+ logger.warning(f" Model names: {loaded_damage_names}")
90
+ DAMAGE_CLASSES = loaded_damage_names # Use names from model file
91
+ logger.warning(f" Updated DAMAGE_CLASSES to: {DAMAGE_CLASSES}")
92
+ logger.info("Damage Segmentation (YOLOv8) model loaded.")
93
+ except Exception as e:
94
+ logger.error(f"Error loading Damage Segmentation (YOLOv8) model: {e}", exc_info=True)
95
+ damage_model = None
96
+
97
+ # --- Load Part Segmentation Model (Model 3 - YOLOv8) ---
98
+ try:
99
+ logger.info(f"Loading Part Segmentation (YOLOv8) model from {PART_MODEL_WEIGHTS_PATH}...")
100
+ if not os.path.exists(PART_MODEL_WEIGHTS_PATH):
101
+ raise FileNotFoundError(f"Part model weights not found: {PART_MODEL_WEIGHTS_PATH}.")
102
+ part_model = YOLO(PART_MODEL_WEIGHTS_PATH)
103
+ part_model.to(DEVICE) # Ensure model is on the correct device
104
+ # Verify class names match
105
+ loaded_part_names = list(part_model.names.values())
106
+ if loaded_part_names != CAR_PART_CLASSES:
107
+ logger.warning(f"Mismatch between defined CAR_PART_CLASSES and names in {PART_MODEL_WEIGHTS_PATH}")
108
+ logger.warning(f" Model names: {loaded_part_names}")
109
+ CAR_PART_CLASSES = loaded_part_names # Use names from model file
110
+ logger.warning(f" Updated CAR_PART_CLASSES to: {CAR_PART_CLASSES}")
111
+ logger.info("Part Segmentation (YOLOv8) model loaded.")
112
+ except Exception as e:
113
+ logger.error(f"Error loading Part Segmentation (YOLOv8) model: {e}", exc_info=True)
114
+ part_model = None
115
+
116
+ print("Model loading complete.")
117
+
118
+ # --- Prediction Functions ---
119
+
120
+ def classify_image_clip(image_pil):
121
+ """Classifies image using CLIP. Returns label and probabilities."""
122
+ if clip_model is None or clip_text_features is None:
123
+ return "Error: CLIP Model Not Loaded", {"Error": 1.0}
124
+ try:
125
+ # Ensure image is RGB PIL
126
+ if image_pil.mode != "RGB":
127
+ image_pil = image_pil.convert("RGB")
128
+ image_input = clip_preprocess(image_pil).unsqueeze(0).to(DEVICE)
129
+ with torch.no_grad():
130
+ image_features = clip_model.encode_image(image_input)
131
+ image_features /= image_features.norm(dim=-1, keepdim=True)
132
+ logit_scale = clip_model.logit_scale.exp()
133
+ similarity = (image_features @ clip_text_features.T) * logit_scale
134
+ probs = similarity.softmax(dim=-1).squeeze().cpu()
135
+ predicted_label = "Car" if probs[0] > probs[1] else "Not Car"
136
+ prob_dict = {"Car": f"{probs[0]:.3f}", "Not Car": f"{probs[1]:.3f}"}
137
+ return predicted_label, prob_dict
138
+ except Exception as e:
139
+ logger.error(f"Error during CLIP prediction: {e}", exc_info=True)
140
+ return "Error during CLIP processing", {"Error": 1.0}
141
+
142
+ # --- Combined Processing and Overlap Logic ---
143
+
144
+ def process_car_image(image_np_bgr):
145
+ """
146
+ Runs damage and part segmentation (both YOLOv8), calculates overlap, and returns results.
147
+ Returns:
148
+ - combined_image_rgb: Image with both part and damage masks drawn.
149
+ - assignment_text: String describing damage-part assignments.
150
+ """
151
+ if damage_model is None:
152
+ logger.error("Damage YOLOv8 model not available.")
153
+ return cv2.cvtColor(image_np_bgr, cv2.COLOR_BGR2RGB), "Error: Damage model not loaded."
154
+ if part_model is None:
155
+ logger.error("Part YOLOv8 model not available.")
156
+ return cv2.cvtColor(image_np_bgr, cv2.COLOR_BGR2RGB), "Error: Part model not loaded."
157
+
158
+ final_assignments = []
159
+ # Use original BGR image for drawing, convert to RGB only for final display
160
+ annotated_image_bgr = image_np_bgr.copy()
161
+ img_h, img_w = image_np_bgr.shape[:2]
162
+
163
+ try:
164
+ # --- 1. Predict Damages (YOLOv8) ---
165
+ logger.info("Running Damage Segmentation (YOLOv8)...")
166
+ # Use conf threshold here
167
+ damage_results = damage_model.predict(image_np_bgr, verbose=False, device=DEVICE, conf=DAMAGE_PRED_THRESHOLD)
168
+ damage_result = damage_results[0] # Result for the first image
169
+ logger.info(f"Found {len(damage_result.boxes)} potential damages.")
170
+
171
+ damage_masks_np = damage_result.masks.data.cpu().numpy().astype(bool) if damage_result.masks is not None else np.array([])
172
+ damage_classes_ids = damage_result.boxes.cls.cpu().numpy().astype(int) if damage_result.boxes is not None else np.array([])
173
+ damage_boxes = damage_result.boxes.xyxy.cpu().numpy() if damage_result.boxes is not None else np.array([]) # For drawing
174
+
175
+ # --- 2. Predict Parts (YOLOv8) ---
176
+ logger.info("Running Part Segmentation (YOLOv8)...")
177
+ part_results = part_model.predict(image_np_bgr, verbose=False, device=DEVICE, conf=PART_PRED_THRESHOLD)
178
+ part_result = part_results[0] # Result for the first image
179
+ logger.info(f"Found {len(part_result.boxes)} potential parts.")
180
+
181
+ part_masks_np = part_result.masks.data.cpu().numpy().astype(bool) if part_result.masks is not None else np.array([]) # [N_part, H, W]
182
+ part_classes_ids = part_result.boxes.cls.cpu().numpy().astype(int) if part_result.boxes is not None else np.array([])
183
+ part_boxes = part_result.boxes.xyxy.cpu().numpy() if part_result.boxes is not None else np.array([]) # For drawing
184
+
185
+ # --- 3. Resize Masks if Necessary ---
186
+ # YOLO segmentation masks might be smaller than original image, resize them
187
+ def resize_masks(masks_np, target_h, target_w):
188
+ if masks_np.shape[0] == 0 or (masks_np.shape[1] == target_h and masks_np.shape[2] == target_w):
189
+ return masks_np # Return if empty or already correct size
190
+ logger.info(f"Resizing {masks_np.shape[0]} masks from {masks_np.shape[1:]} to {(target_h, target_w)}")
191
+ resized_masks = []
192
+ for mask in masks_np:
193
+ mask_resized = cv2.resize(mask.astype(np.uint8), (target_w, target_h), interpolation=cv2.INTER_NEAREST)
194
+ resized_masks.append(mask_resized.astype(bool))
195
+ return np.array(resized_masks)
196
+
197
+ damage_masks_np = resize_masks(damage_masks_np, img_h, img_w)
198
+ part_masks_np = resize_masks(part_masks_np, img_h, img_w)
199
+
200
+
201
+ # --- 4. Calculate Overlap ---
202
+ logger.info("Calculating overlap...")
203
+ if damage_masks_np.shape[0] > 0 and part_masks_np.shape[0] > 0:
204
+ overlap_threshold = 0.4 # Minimum overlap ratio
205
+
206
+ for i in range(len(damage_masks_np)): # Iterate through each detected damage
207
+ damage_mask = damage_masks_np[i]
208
+ damage_class_id = damage_classes_ids[i]
209
+ try:
210
+ damage_name = DAMAGE_CLASSES[damage_class_id]
211
+ except IndexError: continue # Skip if invalid class ID
212
+
213
+ damage_area = np.sum(damage_mask)
214
+ if damage_area < 10: continue # Skip tiny damage masks
215
+
216
+ max_overlap = 0
217
+ assigned_part_name = "Unknown / Outside Parts"
218
+
219
+ for j in range(len(part_masks_np)): # Iterate through each detected part
220
+ part_mask = part_masks_np[j]
221
+ part_class_id = part_classes_ids[j]
222
+ try:
223
+ part_name = CAR_PART_CLASSES[part_class_id]
224
+ except IndexError: continue # Skip if invalid class ID
225
+
226
+ intersection = np.logical_and(damage_mask, part_mask)
227
+ intersection_area = np.sum(intersection)
228
+ overlap_ratio = intersection_area / damage_area if damage_area > 0 else 0
229
+
230
+ if overlap_ratio > max_overlap:
231
+ max_overlap = overlap_ratio
232
+ if max_overlap >= overlap_threshold:
233
+ assigned_part_name = part_name
234
+
235
+ assignment_desc = f"{damage_name} in {assigned_part_name}"
236
+ if assigned_part_name == "Unknown / Outside Parts":
237
+ assignment_desc += f" (Overlap < {overlap_threshold*100:.0f}%)"
238
+ final_assignments.append(assignment_desc)
239
+ logger.info(f"Overlap result: {assignment_desc}")
240
+
241
+ elif damage_masks_np.shape[0] > 0: final_assignments.append(f"{len(damage_masks_np)} damages found, but no parts detected/matched.")
242
+ elif part_masks_np.shape[0] > 0: final_assignments.append(f"No damages detected (above threshold).")
243
+ else: final_assignments.append("No damages or parts detected.")
244
+
245
+
246
+ # --- 5. Visualization using YOLO Annotator ---
247
+ logger.info("Visualizing results...")
248
+ # Create annotator ONCE on the BGR image copy
249
+ annotator = Annotator(annotated_image_bgr, line_width=2, example=part_model.names) # Use part model names
250
+
251
+ # Draw PART masks first (Green boxes, light semi-transparent masks)
252
+ if part_result.masks is not None:
253
+ colors_part = [(0, random.randint(100, 200), 0) for _ in part_classes_ids] # Shades of green
254
+ annotator.masks(part_result.masks.data, colors=colors_part, alpha=0.3)
255
+ for box, cls_id in zip(part_boxes, part_classes_ids):
256
+ try:
257
+ label = f"{CAR_PART_CLASSES[cls_id]}"
258
+ annotator.box_label(box, label=label, color=(0, 200, 0)) # Darker green box/text
259
+ except IndexError: continue
260
+
261
+ # Draw DAMAGE masks second (Red boxes, light semi-transparent masks)
262
+ if damage_result.masks is not None:
263
+ colors_dmg = [(random.randint(100, 200), 0, 0) for _ in damage_classes_ids] # Shades of red
264
+ annotator.masks(damage_result.masks.data, colors=colors_dmg, alpha=0.4) # Slightly more opaque
265
+ for box, cls_id in zip(damage_boxes, damage_classes_ids):
266
+ try:
267
+ label = f"{DAMAGE_CLASSES[cls_id]}"
268
+ annotator.box_label(box, label=label, color=(200, 0, 0)) # Darker red box/text
269
+ except IndexError: continue
270
+
271
+ # Get the final annotated image (still BGR)
272
+ annotated_image_bgr = annotator.result()
273
+
274
+ except Exception as e:
275
+ logger.error(f"Error during combined processing: {e}", exc_info=True)
276
+ final_assignments.append("Error during processing.")
277
+ # Return original image in case of error (but annotated_image_bgr might exist)
278
+
279
+ # --- Prepare output ---
280
+ assignment_text = "\n".join(final_assignments) if final_assignments else "No specific damage assignments."
281
+ # Convert final annotated image to RGB for Gradio display
282
+ final_output_image_rgb = cv2.cvtColor(annotated_image_bgr, cv2.COLOR_BGR2RGB)
283
+
284
+ return final_output_image_rgb, assignment_text
285
+
286
+
287
+ # --- Main Gradio Function ---
288
+ def predict_pipeline(image_np_input):
289
+ """
290
+ Main pipeline: Classify -> Segment -> Assign -> Visualize
291
+ """
292
+ if image_np_input is None:
293
+ return "Please upload an image.", {}, None, "N/A"
294
+
295
+ logger.info("Received image for processing...")
296
+ final_output_image = None
297
+ assignment_text = "Processing..."
298
+ classification_result = "Error"
299
+ probabilities = {}
300
+
301
+ # --- Stage 1: CLIP Classification ---
302
+ try:
303
+ image_pil = Image.fromarray(cv2.cvtColor(image_np_input, cv2.COLOR_BGR2RGB))
304
+ classification_result, probabilities = classify_image_clip(image_pil)
305
+ logger.info(f"CLIP Result: {classification_result}, Probs: {probabilities}")
306
+ except Exception as e:
307
+ logger.error(f"Error in CLIP stage: {e}", exc_info=True)
308
+ assignment_text = "Error during classification."
309
+ # Show original image in case of classification error
310
+ final_output_image = cv2.cvtColor(image_np_input, cv2.COLOR_BGR2RGB)
311
+
312
+ # --- Stage 2 & 3: Segmentation and Assignment (if 'Car') ---
313
+ if classification_result == "Car":
314
+ logger.info("Image classified as Car. Running segmentation and assignment...")
315
+ try:
316
+ # Pass the original BGR numpy array
317
+ final_output_image, assignment_text = process_car_image(image_np_input)
318
+ except Exception as e:
319
+ logger.error(f"Error in segmentation/assignment stage: {e}", exc_info=True)
320
+ assignment_text = "Error during segmentation/assignment."
321
+ # Show original image in case of processing error
322
+ final_output_image = cv2.cvtColor(image_np_input, cv2.COLOR_BGR2RGB)
323
+
324
+ elif classification_result == "Not Car":
325
+ logger.info("Image classified as Not Car.")
326
+ final_output_image = cv2.cvtColor(image_np_input, cv2.COLOR_BGR2RGB) # Show original
327
+ assignment_text = "Image classified as Not Car."
328
+ # Else: Handle CLIP error case (already logged, show original image)
329
+ elif final_output_image is None: # Ensure image is set if CLIP error occurred
330
+ final_output_image = cv2.cvtColor(image_np_input, cv2.COLOR_BGR2RGB)
331
+
332
+
333
+ # --- Cleanup ---
334
+ gc.collect()
335
+ if torch.cuda.is_available():
336
+ torch.cuda.empty_cache()
337
+
338
+ # Return all results
339
+ return classification_result, probabilities, final_output_image, assignment_text
340
+
341
+
342
+ # --- Gradio Interface ---
343
+ logger.info("Setting up Gradio interface...")
344
+
345
+ title = "🚗 Car Damage Analysis Pipeline (YOLOv8)"
346
+ description = """
347
+ 1. **Upload** an image of a vehicle.
348
+ 2. **Classification:** Determines if the image contains a car (using CLIP).
349
+ 3. **Segmentation:** If it's a car, detects car parts and damages (using YOLOv8 for both).
350
+ 4. **Assignment:** Assigns detected damages to the corresponding car part based on mask overlap.
351
+ 5. **Output:** Shows the image with overlaid masks (Green=Part, Red=Damage) and lists the damage assignments.
352
+ """
353
+ examples = [] # Add example image paths if uploaded
354
+
355
+ # Define Inputs and Outputs
356
+ input_image = gr.Image(type="numpy", label="Upload Car Image")
357
+ output_classification = gr.Textbox(label="1. Classification Result")
358
+ output_probabilities = gr.Label(label="Classification Probabilities")
359
+ output_image_display = gr.Image(type="numpy", label="3. Segmentation Visualization")
360
+ output_assignment = gr.Textbox(label="2. Damage Assignments", lines=5, interactive=False)
361
+
362
+
363
+ # Launch the interface
364
+ iface = gr.Interface(
365
+ fn=predict_pipeline,
366
+ inputs=input_image,
367
+ outputs=[output_classification, output_probabilities, output_image_display, output_assignment],
368
+ title=title,
369
+ description=description,
370
+ examples=examples,
371
+ allow_flagging="never"
372
+ )
373
+
374
+ if __name__ == "__main__":
375
+ logger.info("Launching Gradio app...")
376
+ iface.launch()
best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae345bfb159676f6343daf72c1912bb374fa4997e6788e84d930b9bb28751d27
3
+ size 92296829
clip_text_features.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28315215c9429a04e5aafd99cf8a0292a489bf2937d44d580a3cf1c78ee84f94
3
+ size 3283
partdetection yolobest.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c49c34a128d155b61c8990218d24d0f17ef979c56eaf63c497b547523f39d64
3
+ size 92324733
requirements.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 1. Install Specific PyTorch/CUDA Version First
2
+ # Update CUDA version if necessary (e.g., cu117, cu121)
3
+ --extra-index-url https://download.pytorch.org/whl/cu118
4
+ torch>=2.0.0 # YOLOv8 generally needs newer torch
5
+ torchvision>=0.15.0
6
+ torchaudio>=2.0.0
7
+
8
+ # 2. Install YOLO and its dependencies
9
+ ultralytics
10
+
11
+ # 3. Install other libraries needed by your app.py
12
+ gradio
13
+ opencv-python-headless
14
+ matplotlib
15
+ pyyaml>=5.1 # Still needed by YOLO/others
16
+ termcolor
17
+ cloudpickle
18
+ submitit
19
+ ftfy
20
+ regex
21
+ tqdm
22
+
23
+ # 4. Install CLIP from source
24
+ git+https://github.com/openai/CLIP.git