YureiYuri commited on
Commit
cd4a344
Β·
verified Β·
1 Parent(s): 8e7df28

update fix button issue

Browse files
Files changed (1) hide show
  1. app.py +49 -28
app.py CHANGED
@@ -14,7 +14,7 @@ from huggingface_hub import InferenceClient
14
  # ============================================
15
 
16
  CLASSIFIER_MODEL = "YureiYuri/empathy"
17
- LLM_MODEL = "openai/gpt-oss-20b"
18
 
19
  # ============================================
20
  # LOAD CLASSIFIER
@@ -146,25 +146,42 @@ def respond(
146
  yield crisis_msg
147
  return
148
 
149
- # Generate streaming response
150
- client = InferenceClient(token=hf_token.token, model=LLM_MODEL)
151
-
152
- system_message = get_system_prompt()
153
- messages = [{"role": "system", "content": system_message}]
154
- messages.extend(history)
155
- messages.append({"role": "user", "content": message})
156
 
157
- response = ""
158
- for msg in client.chat_completion(
159
- messages,
160
- max_tokens=max_tokens,
161
- stream=True,
162
- temperature=temperature,
163
- top_p=top_p,
164
- ):
165
- if msg.choices and msg.choices[0].delta.content:
166
- response += msg.choices[0].delta.content
167
- yield response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
 
169
 
170
  # ============================================
@@ -190,7 +207,11 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
190
  )
191
 
192
  with gr.Column(scale=1):
193
- gr.LoginButton()
 
 
 
 
194
 
195
  gr.Markdown("### πŸ“Š Emotional Tracking")
196
  gr.Markdown(
@@ -202,14 +223,14 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
202
  )
203
 
204
  metrics_display = gr.Markdown(f"""
205
- **Current Mode:** 🟒 SUPPORTIVE
206
-
207
- **Severity:** 0.0/100
208
- - Dejection: 0.0
209
- - Mood: 0.0
210
- - Calmness: 0.0
211
-
212
- *Metrics update as you chat*
213
  """)
214
 
215
  gr.Markdown(
 
14
  # ============================================
15
 
16
  CLASSIFIER_MODEL = "YureiYuri/empathy"
17
+ LLM_MODEL = "meta-llama/Llama-3.2-3B-Instruct" # Free model, better than TinyLlama
18
 
19
  # ============================================
20
  # LOAD CLASSIFIER
 
146
  yield crisis_msg
147
  return
148
 
149
+ # Check if user is authenticated
150
+ if not hf_token or not hf_token.token:
151
+ yield "⚠️ Please sign in with your Hugging Face account (click the button in the sidebar) to start chatting."
152
+ return
 
 
 
153
 
154
+ # Generate streaming response with error handling
155
+ try:
156
+ client = InferenceClient(token=hf_token.token, model=LLM_MODEL)
157
+
158
+ system_message = get_system_prompt()
159
+ messages = [{"role": "system", "content": system_message}]
160
+ messages.extend(history)
161
+ messages.append({"role": "user", "content": message})
162
+
163
+ response = ""
164
+ for msg in client.chat_completion(
165
+ messages,
166
+ max_tokens=max_tokens,
167
+ stream=True,
168
+ temperature=temperature,
169
+ top_p=top_p,
170
+ ):
171
+ if msg.choices and msg.choices[0].delta.content:
172
+ response += msg.choices[0].delta.content
173
+ yield response
174
+
175
+ except Exception as e:
176
+ error_msg = str(e)
177
+ print(f"❌ LLM Error: {error_msg}")
178
+
179
+ if "401" in error_msg or "Unauthorized" in error_msg:
180
+ yield "⚠️ Authentication error. Please sign in with your Hugging Face account using the button in the sidebar."
181
+ elif "quota" in error_msg.lower() or "rate" in error_msg.lower():
182
+ yield "⚠️ API rate limit reached. Please try again in a moment."
183
+ else:
184
+ yield "I'm here to listen and support you. Could you tell me more about what you're experiencing?"
185
 
186
 
187
  # ============================================
 
207
  )
208
 
209
  with gr.Column(scale=1):
210
+ gr.Markdown("### πŸ” Authentication")
211
+ gr.LoginButton(size="lg")
212
+ gr.Markdown("*Sign in with Hugging Face to chat*")
213
+
214
+ gr.Markdown("---")
215
 
216
  gr.Markdown("### πŸ“Š Emotional Tracking")
217
  gr.Markdown(
 
223
  )
224
 
225
  metrics_display = gr.Markdown(f"""
226
+ **Current Mode:** 🟒 SUPPORTIVE
227
+
228
+ **Severity:** 0.0/100
229
+ - Dejection: 0.0
230
+ - Mood: 0.0
231
+ - Calmness: 0.0
232
+
233
+ *Metrics update as you chat*
234
  """)
235
 
236
  gr.Markdown(