thanhkt commited on
Commit
c640bc5
Β·
1 Parent(s): 9925113

Update gradio_app.py

Browse files
Files changed (1) hide show
  1. gradio_app.py +45 -56
gradio_app.py CHANGED
@@ -15,7 +15,6 @@ from mllm_tools.litellm import LiteLLMWrapper
15
  from src.config.config import Config
16
  from generate_video import EnhancedVideoGenerator, VideoGenerationConfig, allowed_models
17
  from provider import provider_manager
18
- from llm_config import LLMConfigurationFacade, LLMConfiguration
19
 
20
  # Configure logging
21
  logging.basicConfig(
@@ -34,17 +33,8 @@ os.makedirs("thumbnails", exist_ok=True)
34
  # Global dictionary to track job status
35
  job_status = {}
36
 
37
- # Initialize the new LLM configuration system
38
- llm_config_facade = LLMConfigurationFacade()
39
-
40
- # Model descriptions for better user understanding
41
- MODEL_DESCRIPTIONS = {
42
- "openai/gpt-4o": "πŸš€ Latest OpenAI model with enhanced capabilities"
43
- }
44
-
45
- # Set default model and provider
46
- DEFAULT_MODEL = "openai/gpt-4o"
47
- DEFAULT_PROVIDER = "openai"
48
 
49
  def cancel_job(job_id):
50
  """Cancel a running job."""
@@ -96,8 +86,8 @@ def get_job_statistics():
96
 
97
  def init_video_generator(params):
98
  """Initialize the EnhancedVideoGenerator with the given parameters."""
99
- model_name = params.get('model', 'gemini/gemini-2.5-flash-preview-04-17')
100
- helper_model_name = params.get('helper_model', model_name)
101
  verbose = params.get('verbose', True) # Set verbose to True by default for better debugging
102
  max_scene_concurrency = params.get('max_scene_concurrency', 1)
103
 
@@ -303,7 +293,7 @@ def start_async_job(job_id, params):
303
  thread.start()
304
  return thread
305
 
306
- def submit_job(topic, description, model, helper_model, max_retries, use_rag, use_visual_fix_code, temperature, use_context_learning, verbose, max_scene_concurrency):
307
  """Submit a new video generation job."""
308
  # Input validation
309
  if not topic.strip():
@@ -318,13 +308,13 @@ def submit_job(topic, description, model, helper_model, max_retries, use_rag, us
318
  if len(description.strip()) < 10:
319
  return "❌ Error: Description must be at least 10 characters long", None, gr.update(visible=False)
320
 
321
- # Validate LLM configuration
322
- current_config = llm_config_facade.load_configuration()
323
- if not current_config or not current_config.api_key:
324
- return "❌ Error: Please configure and validate your API key first", None, gr.update(visible=False)
325
 
326
- if not model:
327
- return "❌ Error: Please select a model", None, gr.update(visible=False)
 
328
 
329
  try:
330
  # Generate job ID
@@ -342,7 +332,7 @@ def submit_job(topic, description, model, helper_model, max_retries, use_rag, us
342
  'message': 'Job submitted, waiting to start...'
343
  }
344
 
345
- # Prepare parameters with validated configuration
346
  params = {
347
  'topic': topic,
348
  'description': description,
@@ -356,9 +346,9 @@ def submit_job(topic, description, model, helper_model, max_retries, use_rag, us
356
  'verbose': verbose,
357
  'max_scene_concurrency': max_scene_concurrency,
358
  'output_dir': Config.OUTPUT_DIR,
359
- # Add validated configuration
360
- 'provider': current_config.provider,
361
- 'api_key': current_config.api_key
362
  }
363
 
364
  # Start job asynchronously
@@ -551,27 +541,22 @@ with gr.Blocks(
551
  )
552
  with gr.Column(scale=1):
553
  with gr.Group():
554
- gr.Markdown("### 🌐 LLM Configuration")
555
- # Remove provider selection, always use GPT-4o
556
- model_input = gr.Dropdown(
557
- label="πŸ€– AI Model",
558
- choices=[DEFAULT_MODEL],
559
- value=DEFAULT_MODEL,
560
- interactive=False,
561
- info="Model is fixed to GPT-4o for all generations"
562
- )
563
- model_description = gr.Markdown(
564
- value=MODEL_DESCRIPTIONS[DEFAULT_MODEL],
565
- visible=True,
566
- elem_classes=["model-description"]
567
- )
568
- helper_model_input = gr.Dropdown(
569
- label="πŸ”§ Helper Model",
570
- choices=[DEFAULT_MODEL],
571
- value=DEFAULT_MODEL,
572
- interactive=False,
573
- info="Helper model is fixed to GPT-4o"
574
  )
 
 
 
 
 
575
  temperature_input = gr.Slider(
576
  label="🌑️ Creativity (Temperature)",
577
  minimum=0.0,
@@ -768,11 +753,9 @@ with gr.Blocks(
768
  - Examples or applications to demonstrate
769
  - Preferred video length or depth
770
 
771
- ### πŸ€– Step 2: Model Selection
772
- - **Gemini 1.5 Pro**: Best for complex mathematical reasoning
773
- - **Gemini 2.0 Flash**: Fastest processing, good for simple topics
774
- - **GPT-4**: Reliable and consistent output
775
- - **Claude**: Excellent for detailed explanations
776
 
777
  ### βš™οΈ Step 3: Advanced Settings
778
  - **Temperature**: 0.3-0.5 for factual content, 0.7-0.9 for creative explanations
@@ -806,10 +789,7 @@ with gr.Blocks(
806
 
807
  # Event handlers with improved functionality
808
  def clear_form():
809
- return ("", "", 0.7, False, True, False, True, 1, 1, "Form cleared! Ready for new input.")
810
-
811
- def update_model_description(model):
812
- return MODEL_DESCRIPTIONS.get(model, "No description available")
813
 
814
  def update_stats():
815
  stats = get_job_statistics()
@@ -830,12 +810,21 @@ with gr.Blocks(
830
  job_status.clear()
831
  return f"Cleared all {count} jobs"
832
 
 
 
 
 
 
 
 
 
 
833
  submit_btn.click(
834
  fn=submit_job,
835
  inputs=[
836
- topic_input, description_input, model_input, helper_model_input, max_retries_input,
837
  use_rag_input, use_visual_fix_code_input, temperature_input, use_context_learning_input,
838
- verbose_input, max_scene_concurrency_input
839
  ],
840
  outputs=[result_text, job_id_output, status_container]
841
  ).then(
 
15
  from src.config.config import Config
16
  from generate_video import EnhancedVideoGenerator, VideoGenerationConfig, allowed_models
17
  from provider import provider_manager
 
18
 
19
  # Configure logging
20
  logging.basicConfig(
 
33
  # Global dictionary to track job status
34
  job_status = {}
35
 
36
+ # Default model setting - simplified to use GPT-4o-mini for all operations
37
+ DEFAULT_MODEL = "openai/gpt-4o-mini"
 
 
 
 
 
 
 
 
 
38
 
39
  def cancel_job(job_id):
40
  """Cancel a running job."""
 
86
 
87
  def init_video_generator(params):
88
  """Initialize the EnhancedVideoGenerator with the given parameters."""
89
+ model_name = params.get('model', DEFAULT_MODEL)
90
+ helper_model_name = params.get('helper_model', DEFAULT_MODEL)
91
  verbose = params.get('verbose', True) # Set verbose to True by default for better debugging
92
  max_scene_concurrency = params.get('max_scene_concurrency', 1)
93
 
 
293
  thread.start()
294
  return thread
295
 
296
+ def submit_job(topic, description, max_retries, use_rag, use_visual_fix_code, temperature, use_context_learning, verbose, max_scene_concurrency, api_key):
297
  """Submit a new video generation job."""
298
  # Input validation
299
  if not topic.strip():
 
308
  if len(description.strip()) < 10:
309
  return "❌ Error: Description must be at least 10 characters long", None, gr.update(visible=False)
310
 
311
+ # Set default model
312
+ model = DEFAULT_MODEL
313
+ helper_model = DEFAULT_MODEL
 
314
 
315
+ # Validate API key
316
+ if not api_key or not api_key.strip():
317
+ return "❌ Error: Please enter your OpenAI API key", None, gr.update(visible=False)
318
 
319
  try:
320
  # Generate job ID
 
332
  'message': 'Job submitted, waiting to start...'
333
  }
334
 
335
+ # Prepare parameters with default configuration
336
  params = {
337
  'topic': topic,
338
  'description': description,
 
346
  'verbose': verbose,
347
  'max_scene_concurrency': max_scene_concurrency,
348
  'output_dir': Config.OUTPUT_DIR,
349
+ # Use OpenAI as default provider
350
+ 'provider': 'openai',
351
+ 'api_key': api_key.strip()
352
  }
353
 
354
  # Start job asynchronously
 
541
  )
542
  with gr.Column(scale=1):
543
  with gr.Group():
544
+ gr.Markdown("### πŸ”‘ API Configuration")
545
+
546
+ # Simple API key input
547
+ api_key_input = gr.Textbox(
548
+ label="πŸ”‘ OpenAI API Key",
549
+ placeholder="Enter your OpenAI API key",
550
+ type="password",
551
+ value=os.getenv('OPENAI_API_KEY', ''),
552
+ interactive=True,
553
+ info="Your OpenAI API key for GPT-4o-mini access"
 
 
 
 
 
 
 
 
 
 
554
  )
555
+
556
+ # Display current model setting
557
+ gr.Markdown(f"**πŸ€– AI Model:** Using {DEFAULT_MODEL} for all operations")
558
+
559
+ # Temperature control
560
  temperature_input = gr.Slider(
561
  label="🌑️ Creativity (Temperature)",
562
  minimum=0.0,
 
753
  - Examples or applications to demonstrate
754
  - Preferred video length or depth
755
 
756
+ ### πŸ”‘ Step 2: API Configuration
757
+ - **OpenAI API Key**: Enter your OpenAI API key for access
758
+ - **AI Model**: Automatically uses GPT-4o-mini for optimal cost and performance
 
 
759
 
760
  ### βš™οΈ Step 3: Advanced Settings
761
  - **Temperature**: 0.3-0.5 for factual content, 0.7-0.9 for creative explanations
 
789
 
790
  # Event handlers with improved functionality
791
  def clear_form():
792
+ return ("", "", 0.7, False, True, False, True, 1, 1, os.getenv('OPENAI_API_KEY', ''), "Form cleared! Ready for new input.")
 
 
 
793
 
794
  def update_stats():
795
  stats = get_job_statistics()
 
810
  job_status.clear()
811
  return f"Cleared all {count} jobs"
812
 
813
+ # Connect simplified event handlers - no model selection needed
814
+
815
+ clear_form_btn.click(
816
+ fn=clear_form,
817
+ outputs=[topic_input, description_input, temperature_input,
818
+ use_rag_input, use_visual_fix_code_input, use_context_learning_input,
819
+ verbose_input, max_retries_input, max_scene_concurrency_input, api_key_input, result_text]
820
+ )
821
+
822
  submit_btn.click(
823
  fn=submit_job,
824
  inputs=[
825
+ topic_input, description_input, max_retries_input,
826
  use_rag_input, use_visual_fix_code_input, temperature_input, use_context_learning_input,
827
+ verbose_input, max_scene_concurrency_input, api_key_input
828
  ],
829
  outputs=[result_text, job_id_output, status_container]
830
  ).then(