import torch from transformers import AutoModelForCausalLM, AutoTokenizer import gradio as gr model_name = "RUC-DataLab/DeepAnalyze-8B" # 先尝试 fast tokenizer,失败就退回到 slow tokenizer(use_fast=False) from transformers import AutoTokenizer try: tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True, use_fast=True) except Exception as e: print("Fast tokenizer failed:", e) print("Falling back to slow tokenizer (use_fast=False).") tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True, use_fast=False) model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto", trust_remote_code=True) def chat_fn(message, history): inputs = tokenizer.apply_chat_template( history + [{"role": "user", "content": message}], add_generation_prompt=True, return_tensors="pt" ).to(model.device) output = model.generate(inputs, max_new_tokens=512) response = tokenizer.decode(output[0], skip_special_tokens=True) history = history + [ {"role":"user","content":message}, {"role":"assistant","content":response} ] return response, history gr.ChatInterface(chat_fn).launch()