File size: 1,250 Bytes
1459517
 
 
 
 
 
b71ebe4
 
 
 
 
 
 
 
 
 
1459517
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr

model_name = "RUC-DataLab/DeepAnalyze-8B"

# 先尝试 fast tokenizer,失败就退回到 slow tokenizer(use_fast=False)
from transformers import AutoTokenizer

try:
    tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True, use_fast=True)
except Exception as e:
    print("Fast tokenizer failed:", e)
    print("Falling back to slow tokenizer (use_fast=False).")
    tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True, use_fast=False)

model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto", trust_remote_code=True)

def chat_fn(message, history):
    inputs = tokenizer.apply_chat_template(
        history + [{"role": "user", "content": message}],
        add_generation_prompt=True,
        return_tensors="pt"
    ).to(model.device)

    output = model.generate(inputs, max_new_tokens=512)
    response = tokenizer.decode(output[0], skip_special_tokens=True)

    history = history + [
        {"role":"user","content":message},
        {"role":"assistant","content":response}
    ]
    return response, history

gr.ChatInterface(chat_fn).launch()