igor04091968 commited on
Commit
9424529
·
1 Parent(s): 4300e31

Test: Simplify app.py to test logging

Browse files
Files changed (1) hide show
  1. app.py +1 -658
app.py CHANGED
@@ -1,658 +1 @@
1
- import random
2
- from collections.abc import Mapping
3
- from uuid import uuid4
4
-
5
- from openai import OpenAI
6
- import gradio as gr
7
- import base64
8
- import mimetypes
9
- import copy
10
- import os
11
-
12
- # Workaround for PyCharm debugger + uvicorn compatibility error:
13
- # TypeError: _patch_asyncio.<locals>.run() got an unexpected keyword argument 'loop_factory'
14
- DEBUG = False
15
- if DEBUG is True: # or sys.gettrace() is not None: # Debugger is attached
16
- import asyncio
17
- _original_run = asyncio.run
18
- def _patched_run(main, **kwargs):
19
- kwargs.pop('loop_factory', None) # Remove unsupported arg
20
- return _original_run(main, **kwargs)
21
- asyncio.run = _patched_run
22
-
23
- from theme import apriel
24
- from utils import COMMUNITY_POSTFIX_URL, get_model_config, check_format, models_config, \
25
- logged_event_handler, DEBUG_MODE, DEBUG_MODEL, log_debug, log_info, log_error, log_warning
26
- from log_chat import log_chat
27
-
28
- DEFAULT_MODEL_TEMPERATURE = 0.6
29
- BUTTON_WIDTH = 160
30
- DEFAULT_OPT_OUT_VALUE = DEBUG_MODE
31
-
32
- # If DEBUG_MODEL is True, use an alternative model (without reasoning) for testing
33
- DEFAULT_MODEL_NAME = "Apriel-1.5-15B-thinker" if not DEBUG_MODEL else "Apriel-1.5-15B-thinker" # "Apriel-5b"
34
-
35
- BUTTON_ENABLED = gr.update(interactive=True)
36
- BUTTON_DISABLED = gr.update(interactive=False)
37
- INPUT_ENABLED = gr.update(interactive=True)
38
- INPUT_DISABLED = gr.update(interactive=False)
39
- DROPDOWN_ENABLED = gr.update(interactive=True)
40
- DROPDOWN_DISABLED = gr.update(interactive=False)
41
-
42
- SEND_BUTTON_ENABLED = gr.update(interactive=True, visible=True)
43
- SEND_BUTTON_DISABLED = gr.update(interactive=True, visible=False)
44
- STOP_BUTTON_ENABLED = gr.update(interactive=True, visible=True)
45
- STOP_BUTTON_DISABLED = gr.update(interactive=True, visible=False)
46
-
47
- chat_start_count = 0
48
- model_config = {}
49
- openai_client = None
50
-
51
- USE_RANDOM_ENDPOINT = False
52
- endpoint_rotation_count = 0
53
-
54
- # Maximum number of image messages allowed per request
55
- MAX_IMAGE_MESSAGES = 5
56
-
57
-
58
- def app_loaded(state, request: gr.Request):
59
- log_info("Entering app_loaded")
60
- try:
61
- message_html = setup_model(DEFAULT_MODEL_NAME, intial=False)
62
- state['session'] = request.session_hash if request else uuid4().hex
63
- log_debug(f"app_loaded() --> Session: {state['session']}")
64
- log_info("Exiting app_loaded successfully")
65
- return state, message_html
66
- except Exception as e:
67
- log_error(f"Error in app_loaded: {e}")
68
- raise
69
-
70
- def update_model_and_clear_chat(model_name):
71
- actual_model_name = model_name.replace("Model: ", "")
72
- desc = setup_model(actual_model_name)
73
- return desc, []
74
-
75
-
76
- def setup_model(model_key, intial=False):
77
- log_info(f"Entering setup_model with model_key: {model_key}")
78
- global model_config, openai_client, endpoint_rotation_count
79
- model_config = get_model_config(model_key)
80
- log_debug(f"update_model() --> Model config: {model_config}")
81
-
82
- url_list = (model_config.get('VLLM_API_URL_LIST') or "").split(",")
83
- if USE_RANDOM_ENDPOINT:
84
- base_url = random.choice(url_list) if len(url_list) > 0 else model_config.get('VLLM_API_URL')
85
- else:
86
- base_url = url_list[endpoint_rotation_count % len(url_list)]
87
- endpoint_rotation_count += 1
88
-
89
- openai_client = OpenAI(
90
- api_key=model_config.get('AUTH_TOKEN'),
91
- base_url=base_url
92
- )
93
- model_config['base_url'] = base_url
94
- log_debug(f"Switched to model {model_key} using endpoint {base_url}")
95
-
96
- _model_hf_name = model_config.get("MODEL_HF_URL").split('https://huggingface.co/')[1]
97
- _link = f"<a href='{model_config.get('MODEL_HF_URL')}{COMMUNITY_POSTFIX_URL}' target='_blank'>{_model_hf_name}</a>"
98
- _description = f"We'd love to hear your thoughts on the model. Click here to provide feedback - {_link}"
99
-
100
- if intial:
101
- return
102
- else:
103
- return _description
104
-
105
-
106
- def chat_started():
107
- # outputs: model_dropdown, user_input, send_btn, stop_btn, clear_btn
108
- return (DROPDOWN_DISABLED, gr.update(value="", interactive=False),
109
- SEND_BUTTON_DISABLED, STOP_BUTTON_ENABLED, BUTTON_DISABLED)
110
-
111
-
112
- def chat_finished():
113
- # outputs: model_dropdown, user_input, send_btn, stop_btn, clear_btn
114
- return DROPDOWN_ENABLED, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED
115
-
116
-
117
- def stop_chat(state):
118
- state["stop_flag"] = True
119
- gr.Info("Chat stopped")
120
- return state
121
-
122
-
123
- def toggle_opt_out(state, checkbox):
124
- state["opt_out"] = checkbox
125
- return state
126
-
127
-
128
- def run_chat_inference(history, message, state):
129
- global chat_start_count
130
- state["is_streaming"] = True
131
- state["stop_flag"] = False
132
- error = None
133
- model_name = model_config.get('MODEL_NAME')
134
- temperature = model_config.get('TEMPERATURE', DEFAULT_MODEL_TEMPERATURE)
135
-
136
- # Reinitialize the OpenAI client with a random endpoint from the list
137
- setup_model(model_config.get('MODEL_KEY'))
138
- log_info(f"Using model {model_name} (temperature: {temperature}) with endpoint {model_config.get('base_url')}")
139
-
140
- if len(history) == 0:
141
- state["chat_id"] = uuid4().hex
142
-
143
- if openai_client is None:
144
- log_info("Client UI is stale, letting user know to refresh the page")
145
- gr.Warning("Client UI is stale, please refresh the page")
146
- return history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
147
-
148
- # files will be the newly added files from the user
149
- files = []
150
-
151
- # outputs: model_dropdown, user_input, send_btn, stop_btn, clear_btn, session_state
152
- log_debug(f"{'-' * 80}")
153
- log_debug(f"chat_fn() --> Message: {message}")
154
- log_debug(f"chat_fn() --> History: {history}")
155
-
156
- # We have multimodal input in this case
157
- if isinstance(message, Mapping):
158
- files = message.get("files") or []
159
- message = message.get("text") or ""
160
- log_debug(f"chat_fn() --> Message (text only): {message}")
161
- log_debug(f"chat_fn() --> Files: {files}")
162
-
163
- # Validate that any uploaded files are images
164
- if len(files) > 0:
165
- invalid_files = []
166
- for path in files:
167
- try:
168
- mime, _ = mimetypes.guess_type(path)
169
- mime = mime or ""
170
- if not mime.startswith("image/"):
171
- invalid_files.append((os.path.basename(path), mime or "unknown"))
172
- except Exception as e:
173
- log_error(f"Failed to inspect file '{path}': {e}")
174
- invalid_files.append((os.path.basename(path), "unknown"))
175
-
176
- if invalid_files:
177
- msg = "Only image files are allowed. Invalid uploads: " + \
178
- ", ".join([f"{p} (type: {m})" for p, m in invalid_files])
179
- log_warning(msg)
180
- gr.Warning(msg)
181
- yield history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
182
- return history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
183
-
184
- # Enforce maximum number of files/images per request
185
- if len(files) > MAX_IMAGE_MESSAGES:
186
- gr.Warning(f"Too many images provided; keeping only the first {MAX_IMAGE_MESSAGES} file(s).")
187
- files = files[:MAX_IMAGE_MESSAGES]
188
-
189
- try:
190
- # Check if the message is empty
191
- if not message.strip() and len(files) == 0:
192
- gr.Info("Please enter a message before sending")
193
- yield history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
194
- return history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
195
-
196
- chat_start_count = chat_start_count + 1
197
- user_messages_count = sum(1 for item in history if isinstance(item, dict) and item.get("role") == "user"
198
- and isinstance(item.get("content"), str))
199
- log_info(f"chat_start_count: {chat_start_count}, turns: {user_messages_count + 1}, model: {model_name}")
200
-
201
- is_reasoning = model_config.get("REASONING")
202
-
203
- # Remove any assistant messages with metadata from history for multiple turns
204
- log_debug(f"Initial History: {history}")
205
- check_format(history, "messages")
206
- # Build UI history: add text (if any) and per-file image placeholders {"path": ...}
207
- # Build API parts separately later to avoid Gradio issues with arrays in content
208
- if len(files) == 0:
209
- history.append({"role": "user", "content": message})
210
- else:
211
- if message.strip():
212
- history.append({"role": "user", "content": message})
213
- for path in files:
214
- history.append({"role": "user", "content": {"path": path}})
215
-
216
- log_debug(f"History with user message: {history}")
217
- check_format(history, "messages")
218
-
219
- # Create the streaming response
220
- try:
221
- history_no_thoughts = [item for item in history if
222
- not (isinstance(item, dict) and
223
- item.get("role") == "assistant" and
224
- isinstance(item.get("metadata"), dict) and
225
- item.get("metadata", {}).get("title") is not None)]
226
- log_debug(f"Updated History: {history_no_thoughts}")
227
- check_format(history_no_thoughts, "messages")
228
- log_debug(f"history_no_thoughts with user message: {history_no_thoughts}")
229
-
230
- # Build API-specific messages:
231
- # - Convert any UI image placeholders {"path": ...} to image_url parts
232
- # - Convert any user string content that is a valid file path to image_url parts
233
- # - Coalesce consecutive image paths into a single image-only user message
234
- api_messages = []
235
- image_parts_buffer = []
236
-
237
- def flush_image_buffer():
238
- if len(image_parts_buffer) > 0:
239
- api_messages.append({"role": "user", "content": list(image_parts_buffer)})
240
- image_parts_buffer.clear()
241
-
242
- def to_image_part(path: str):
243
- try:
244
- mime, _ = mimetypes.guess_type(path)
245
- mime = mime or "application/octet-stream"
246
- with open(path, "rb") as f:
247
- b64 = base64.b64encode(f.read()).decode("utf-8")
248
- data_url = f"data:{mime};base64,{b64}"
249
- return {"type": "image_url", "image_url": {"url": data_url}}
250
- except Exception as e:
251
- log_error(f"Failed to load file '{path}': {e}")
252
- return None
253
-
254
- def normalize_msg(msg):
255
- # Returns (role, content, as_dict) where as_dict is a message dict suitable to pass through when unmodified
256
- if isinstance(msg, dict):
257
- return msg.get("role"), msg.get("content"), msg
258
- # Gradio ChatMessage-like object
259
- role = getattr(msg, "role", None)
260
- content = getattr(msg, "content", None)
261
- if role is not None:
262
- return role, content, {"role": role, "content": content}
263
- return None, None, msg
264
-
265
- for m in copy.deepcopy(history_no_thoughts):
266
- role, content, as_dict = normalize_msg(m)
267
- # Unknown structure: pass through
268
- if role is None:
269
- flush_image_buffer()
270
- api_messages.append(as_dict)
271
- continue
272
-
273
- # Assistant messages pass through as-is
274
- if role == "assistant":
275
- flush_image_buffer()
276
- api_messages.append(as_dict)
277
- continue
278
-
279
- # Only user messages have potential image paths to convert
280
- if role == "user":
281
- # Case A: {'path': ...}
282
- if isinstance(content, dict) and isinstance(content.get("path"), str):
283
- p = content["path"]
284
- part = to_image_part(p) if os.path.isfile(p) else None
285
- if part:
286
- image_parts_buffer.append(part)
287
- else:
288
- flush_image_buffer()
289
- api_messages.append({"role": "user", "content": str(content)})
290
- continue
291
-
292
- # Case B: string or tuple content that may be a file path
293
- if isinstance(content, str):
294
- if os.path.isfile(content):
295
- part = to_image_part(content)
296
- if part:
297
- image_parts_buffer.append(part)
298
- continue
299
- # Not a file path: pass through as text
300
- flush_image_buffer()
301
- api_messages.append({"role": "user", "content": content})
302
- continue
303
- if isinstance(content, tuple):
304
- # Common case: a single-element tuple containing a path string
305
- tuple_items = list(content)
306
- tmp_parts = []
307
- text_accum = []
308
- for item in tuple_items:
309
- if isinstance(item, str) and os.path.isfile(item):
310
- part = to_image_part(item)
311
- if part:
312
- tmp_parts.append(part)
313
- else:
314
- text_accum.append(item)
315
- else:
316
- text_accum.append(str(item))
317
- if tmp_parts:
318
- flush_image_buffer()
319
- api_messages.append({"role": "user", "content": tmp_parts})
320
- if not text_accum:
321
- continue
322
- if text_accum:
323
- flush_image_buffer()
324
- api_messages.append({"role": "user", "content": "\n".join(text_accum)})
325
- continue
326
-
327
- # Case C: list content
328
- if isinstance(content, list):
329
- # If it's already a list of parts, let it pass through
330
- all_dicts = all(isinstance(c, dict) for c in content)
331
- if all_dicts:
332
- flush_image_buffer()
333
- api_messages.append({"role": "user", "content": content})
334
- continue
335
- # It might be a list of strings (paths/text). Convert string paths to image parts, others to text parts
336
- tmp_parts = []
337
- text_accum = []
338
-
339
- def flush_text_accum():
340
- if text_accum:
341
- api_messages.append({"role": "user", "content": "\n".join(text_accum)})
342
- text_accum.clear()
343
- for item in content:
344
- if isinstance(item, str) and os.path.isfile(item):
345
- part = to_image_part(item)
346
- if part:
347
- tmp_parts.append(part)
348
- else:
349
- text_accum.append(item)
350
- else:
351
- text_accum.append(str(item))
352
- if tmp_parts:
353
- flush_image_buffer()
354
- api_messages.append({"role": "user", "content": tmp_parts})
355
- if text_accum:
356
- flush_text_accum()
357
- continue
358
-
359
- # Fallback: pass through
360
- flush_image_buffer()
361
- api_messages.append(as_dict)
362
- continue
363
-
364
- # Other roles
365
- flush_image_buffer()
366
- api_messages.append(as_dict)
367
-
368
- # Flush any trailing images
369
- flush_image_buffer()
370
-
371
- log_debug(f"sending api_messages to model {model_name}: {api_messages}")
372
-
373
- # Ensure we don't send too many images (count only messages whose content is a list of parts)
374
- image_msg_indices = [
375
- i for i, msg in enumerate(api_messages)
376
- if isinstance(msg, dict) and isinstance(msg.get('content'), list)
377
- ]
378
- image_count = len(image_msg_indices)
379
- if image_count > MAX_IMAGE_MESSAGES:
380
- # Remove oldest image messages until we have MAX_IMAGE_MESSAGES or fewer
381
- to_remove = image_count - MAX_IMAGE_MESSAGES
382
- removed = 0
383
- for idx in image_msg_indices:
384
- if removed >= to_remove:
385
- break
386
- # Pop considering prior removals shift indices
387
- api_messages.pop(idx - removed)
388
- removed += 1
389
- gr.Warning(f"Too many images provided; keeping the latest {MAX_IMAGE_MESSAGES} and dropped {removed} older image message(s).")
390
-
391
- stream = openai_client.chat.completions.create(
392
- model=model_name,
393
- messages=api_messages,
394
- temperature=temperature,
395
- stream=True
396
- )
397
- except Exception as e:
398
- log_error(f"Error:\n\t{e}\n\tInference failed for model {model_name} and endpoint {model_config['base_url']}")
399
- error = str(e)
400
- yield ([{"role": "assistant",
401
- "content": "😔 The model is unavailable at the moment. Please try again later."}],
402
- INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state)
403
- if state["opt_out"] is not True:
404
- log_chat(chat_id=state["chat_id"],
405
- session_id=state["session"],
406
- model_name=model_name,
407
- prompt=message,
408
- history=history,
409
- info={"is_reasoning": model_config.get("REASONING"), "temperature": temperature,
410
- "stopped": True, "error": str(e)},
411
- )
412
- else:
413
- log_info(f"User opted out of chat history. Not logging chat. model: {model_name}")
414
- return history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
415
-
416
- if is_reasoning:
417
- history.append(gr.ChatMessage(
418
- role="assistant",
419
- content="Thinking...",
420
- metadata={"title": "🧠 Thought"}
421
- ))
422
- log_debug(f"History added thinking: {history}")
423
- check_format(history, "messages")
424
- else:
425
- history.append(gr.ChatMessage(
426
- role="assistant",
427
- content="",
428
- ))
429
- log_debug(f"History added empty assistant: {history}")
430
- check_format(history, "messages")
431
-
432
- output = ""
433
- completion_started = False
434
- for chunk in stream:
435
- if state["stop_flag"]:
436
- log_debug(f"chat_fn() --> Stopping streaming...")
437
- break # Exit the loop if the stop flag is set
438
- # Extract the new content from the delta field
439
- content = getattr(chunk.choices[0].delta, "content", "") or ""
440
- reasoning_content = getattr(chunk.choices[0].delta, "reasoning_content", "") or ""
441
- output += reasoning_content + content
442
-
443
- if is_reasoning:
444
- parts = output.split("[BEGIN FINAL RESPONSE]")
445
-
446
- if len(parts) > 1:
447
- if parts[1].endswith("[END FINAL RESPONSE]"):
448
- parts[1] = parts[1].replace("[END FINAL RESPONSE]", "")
449
- if parts[1].endswith("[END FINAL RESPONSE]\n<|end|>"):
450
- parts[1] = parts[1].replace("[END FINAL RESPONSE]\n<|end|>", "")
451
- if parts[1].endswith("[END FINAL RESPONSE]\n<|end|>\n"):
452
- parts[1] = parts[1].replace("[END FINAL RESPONSE]\n<|end|>\n", "")
453
- if parts[1].endswith("<|end|>"):
454
- parts[1] = parts[1].replace("<|end|>", "")
455
- if parts[1].endswith("<|end|>\n"):
456
- parts[1] = parts[1].replace("<|end|>\n", "")
457
-
458
- history[-1 if not completion_started else -2] = gr.ChatMessage(
459
- role="assistant",
460
- content=parts[0],
461
- metadata={"title": "🧠 Thought"}
462
- )
463
- if completion_started:
464
- history[-1] = gr.ChatMessage(
465
- role="assistant",
466
- content=parts[1]
467
- )
468
- elif len(parts) > 1 and not completion_started:
469
- completion_started = True
470
- history.append(gr.ChatMessage(
471
- role="assistant",
472
- content=parts[1]
473
- ))
474
- else:
475
- if output.endswith("<|end|>"):
476
- output = output.replace("<|end|>", "")
477
- if output.endswith("<|end|>\n"):
478
- output = output.replace("<|end|>\n", "")
479
- history[-1] = gr.ChatMessage(
480
- role="assistant",
481
- content=output
482
- )
483
-
484
- # log_message(f"Yielding messages: {history}")
485
- yield history, INPUT_DISABLED, SEND_BUTTON_DISABLED, STOP_BUTTON_ENABLED, BUTTON_DISABLED, state
486
-
487
- log_debug(f"Final History: {history}")
488
- check_format(history, "messages")
489
- yield history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
490
- finally:
491
- if error is None:
492
- log_debug(f"chat_fn() --> Finished streaming. {chat_start_count} chats started.")
493
- if state["opt_out"] is not True:
494
- log_chat(chat_id=state["chat_id"],
495
- session_id=state["session"],
496
- model_name=model_name,
497
- prompt=message,
498
- history=history,
499
- info={"is_reasoning": model_config.get("REASONING"), "temperature": temperature,
500
- "stopped": state["stop_flag"]},
501
- )
502
-
503
- else:
504
- log_info(f"User opted out of chat history. Not logging chat. model: {model_name}")
505
- state["is_streaming"] = False
506
- state["stop_flag"] = False
507
- return history, INPUT_ENABLED, SEND_BUTTON_ENABLED, STOP_BUTTON_DISABLED, BUTTON_ENABLED, state
508
-
509
-
510
- log_info(f"Gradio version: {gr.__version__}")
511
-
512
- title = None
513
- description = None
514
- theme = apriel
515
-
516
- with open('styles.css', 'r') as f:
517
- custom_css = f.read()
518
-
519
- with gr.Blocks(theme=theme, css=custom_css) as demo:
520
- session_state = gr.State(value={
521
- "is_streaming": False,
522
- "stop_flag": False,
523
- "chat_id": None,
524
- "session": None,
525
- "opt_out": DEFAULT_OPT_OUT_VALUE,
526
- }) # Store session state as a dictionary
527
-
528
- gr.HTML(f"""
529
- <style>
530
- @media (min-width: 1024px) {{
531
- .send-button-container, .clear-button-container {{
532
- max-width: {BUTTON_WIDTH}px;
533
- }}
534
- }}
535
- </style>
536
- """, elem_classes="css-styles")
537
- with gr.Row(variant="compact", elem_classes=["responsive-row", "no-padding"], ):
538
- with gr.Column():
539
- gr.Markdown(
540
- """
541
- <span class="banner-message-text">ℹ️ This app has been updated to use the recommended temperature of 0.6. We had set it to 0.8 earlier and expect 0.6 to be better. Please provide feedback using the model link.</span>
542
- """
543
- , elem_classes="banner-message"
544
- )
545
- with gr.Row(variant="panel", elem_classes="responsive-row"):
546
- with gr.Column(scale=1, min_width=400, elem_classes="model-dropdown-container"):
547
- model_dropdown = gr.Dropdown(
548
- choices=[f"Model: {model}" for model in models_config.keys()],
549
- value=f"Model: {DEFAULT_MODEL_NAME}",
550
- label=None,
551
- interactive=True,
552
- container=False,
553
- scale=0,
554
- min_width=400
555
- )
556
- with gr.Column(scale=4, min_width=0):
557
- feedback_message_html = gr.HTML(description, elem_classes="model-message")
558
-
559
- chatbot = gr.Chatbot(
560
- type="messages",
561
- height="calc(100svh - 320px)",
562
- max_height="calc(100svh - 320px)",
563
- elem_classes="chatbot",
564
- )
565
-
566
- with gr.Row():
567
- with gr.Column(scale=10, min_width=400, elem_classes="user-input-container"):
568
- with gr.Row():
569
- user_input = gr.MultimodalTextbox(
570
- interactive=True,
571
- container=False,
572
- file_count="multiple",
573
- placeholder="Type your message here and press Enter or upload file...",
574
- show_label=False,
575
- sources=["upload"],
576
- max_plain_text_length=100000,
577
- max_lines=10
578
- )
579
-
580
- # Original text-only input
581
- # user_input = gr.Textbox(
582
- # show_label=False,
583
- # placeholder="Type your message here and press Enter",
584
- # container=False
585
- # )
586
- with gr.Column(scale=1, min_width=BUTTON_WIDTH * 2 + 20):
587
- with gr.Row():
588
- with gr.Column(scale=1, min_width=BUTTON_WIDTH, elem_classes="send-button-container"):
589
- send_btn = gr.Button("Send", variant="primary", elem_classes="control-button")
590
- stop_btn = gr.Button("Stop", variant="cancel", elem_classes="control-button", visible=False)
591
- with gr.Column(scale=1, min_width=BUTTON_WIDTH, elem_classes="clear-button-container"):
592
- clear_btn = gr.ClearButton(chatbot, value="New Chat", variant="secondary", elem_classes="control-button")
593
- with gr.Row():
594
- with gr.Column(min_width=400, elem_classes="opt-out-container"):
595
- with gr.Row():
596
- gr.HTML(
597
- "We may use your chats to improve our AI. You may opt out if you don’t want your conversations saved.",
598
- elem_classes="opt-out-message")
599
- with gr.Row():
600
- opt_out_checkbox = gr.Checkbox(
601
- label="Don’t save my chat history for improvements or training",
602
- value=DEFAULT_OPT_OUT_VALUE,
603
- elem_classes="opt-out-checkbox",
604
- interactive=True,
605
- container=False
606
- )
607
-
608
- gr.on(
609
- triggers=[send_btn.click, user_input.submit],
610
- fn=run_chat_inference, # this generator streams results. do not use logged_event_handler wrapper
611
- inputs=[chatbot, user_input, session_state],
612
- outputs=[chatbot, user_input, send_btn, stop_btn, clear_btn, session_state],
613
- concurrency_limit=4,
614
- api_name=False
615
- ).then(
616
- fn=chat_finished, inputs=None, outputs=[model_dropdown, user_input, send_btn, stop_btn, clear_btn], queue=False)
617
-
618
- # In parallel, disable or update the UI controls
619
- gr.on(
620
- triggers=[send_btn.click, user_input.submit],
621
- fn=chat_started,
622
- inputs=None,
623
- outputs=[model_dropdown, user_input, send_btn, stop_btn, clear_btn],
624
- queue=False,
625
- show_progress='hidden',
626
- api_name=False
627
- )
628
-
629
- stop_btn.click(
630
- fn=stop_chat,
631
- inputs=[session_state],
632
- outputs=[session_state],
633
- api_name=False
634
- )
635
-
636
- opt_out_checkbox.change(fn=toggle_opt_out, inputs=[session_state, opt_out_checkbox], outputs=[session_state])
637
-
638
- # Ensure the model is reset to default on page reload
639
- demo.load(
640
- fn=logged_event_handler(
641
- log_msg="Browser session started",
642
- event_handler=app_loaded
643
- ),
644
- inputs=[session_state],
645
- outputs=[session_state, feedback_message_html],
646
- queue=True,
647
- api_name=False
648
- )
649
-
650
- model_dropdown.change(
651
- fn=update_model_and_clear_chat,
652
- inputs=[model_dropdown],
653
- outputs=[feedback_message_html, chatbot],
654
- api_name=False
655
- )
656
-
657
- demo.queue(default_concurrency_limit=2).launch(ssr_mode=False, show_api=False, max_file_size="10mb")
658
- log_info("Gradio app launched")
 
1
+ print("Hello, World!")