from flask import Flask, request, render_template, Response
from openai import OpenAI
app = Flask(__name__)
# 初始化 ModelScope 客户端
client = OpenAI(
base_url="https://api-inference.modelscope.cn/v1",
api_key="ms-d948e23a-7f3e-4757-8454-9558849b9fc1",
)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/chat", methods=["POST"])
def chat():
user_input = request.json.get("message", "")
def generate():
response = client.chat.completions.create(
model="deepseek-ai/DeepSeek-V3.1",
messages=[{"role": "user", "content": user_input}],
stream=True
)
done_reasoning = False
for chunk in response:
reasoning_chunk = chunk.choices[0].delta.reasoning_content
answer_chunk = chunk.choices[0].delta.content
if reasoning_chunk:
yield reasoning_chunk
elif answer_chunk:
if not done_reasoning:
yield "\n"
done_reasoning = True
yield answer_chunk
return Response(generate(), mimetype="text/plain")
if __name__ == "__main__":
app.run(debug=True)
Index
<!DOCTYPE html>
<html lang="zh">
<head>
<meta charset="UTF-8">
<title>AI 聊天助手</title>
<style>
body {
font-family: "Microsoft YaHei", sans-serif;
background-color: #f5f5f5;
display: flex;
justify-content: center;
align-items: center;
height: 100vh;
margin: 0;
}
.chat-container {
background: white;
width: 600px;
max-width: 95%;
height: 80vh;
display: flex;
flex-direction: column;
border-radius: 10px;
box-shadow: 0 0 10px rgba(0,0,0,0.1);
}
.chat-box {
flex: 1;
overflow-y: auto;
padding: 15px;
display: flex;
flex-direction: column;
gap: 10px;
}
.chat-bubble {
max-width: 70%;
padding: 10px 15px;
border-radius: 10px;
word-wrap: break-word;
white-space: pre-wrap;
}
.user {
align-self: flex-end;
background-color: #007bff;
color: white;
border-bottom-right-radius: 0;
}
.ai {
align-self: flex-start;
background-color: #eaeaea;
color: black;
border-bottom-left-radius: 0;
}
.input-area {
display: flex;
padding: 10px;
border-top: 1px solid #ddd;
}
.input-area input {
flex: 1;
padding: 8px;
border-radius: 5px;
border: 1px solid #ddd;
}
.input-area button {
background: #007bff;
color: white;
border: none;
margin-left: 10px;
padding: 8px 12px;
border-radius: 5px;
cursor: pointer;
}
.input-area button:hover {
background: #0056b3;
}
</style>
</head>
<body>
<div class="chat-container">
<div id="chatBox" class="chat-box"></div>
<div class="input-area">
<input type="text" id="userInput" placeholder="请输入你的问题..." onkeydown="if(event.key==='Enter') sendMessage()">
<button onclick="sendMessage()">发送</button>
</div>
</div>
<script>
async function sendMessage() {
const inputBox = document.getElementById("userInput");
const chatBox = document.getElementById("chatBox");
const userMessage = inputBox.value.trim();
if (!userMessage) return;
// 添加用户气泡
const userBubble = document.createElement("div");
userBubble.className = "chat-bubble user";
userBubble.textContent = userMessage;
chatBox.appendChild(userBubble);
// 创建 AI 气泡
const aiBubble = document.createElement("div");
aiBubble.className = "chat-bubble ai";
aiBubble.textContent = "正在思考...";
chatBox.appendChild(aiBubble);
chatBox.scrollTop = chatBox.scrollHeight;
inputBox.value = "";
const response = await fetch("/chat", {
method: "POST",
headers: {"Content-Type": "application/json"},
body: JSON.stringify({ message: userMessage })
});
const reader = response.body.getReader();
const decoder = new TextDecoder();
aiBubble.textContent = ""; // 清空“正在思考...”
while (true) {
const {done, value} = await reader.read();
if (done) break;
aiBubble.textContent += decoder.decode(value);
chatBox.scrollTop = chatBox.scrollHeight;
}
}
</script>
</body>
</html>
import gradio as gr
from openai import OpenAI
# 初始化 ModelScope 客户端
client = OpenAI(
base_url="https://api-inference.modelscope.cn/v1",
api_key="ms-d948e23a-7f3e-4757-8454-9558849b9fc1",
)
def chat_fn(history, user_message):
"""
history: [(user, bot), ...]
user_message: 用户输入的新问题
"""
messages = []
for user, bot in history:
messages.append({"role": "user", "content": user})
messages.append({"role": "assistant", "content": bot})
messages.append({"role": "user", "content": user_message})
def stream_response():
bot_reply = "🤔 思考中..."
yield history + [(user_message, bot_reply)]
bot_reply = ""
response = client.chat.completions.create(
model="deepseek-ai/DeepSeek-V3.1",
messages=messages,
stream=True
)
done_reasoning = False
for chunk in response:
reasoning_chunk = chunk.choices[0].delta.reasoning_content
answer_chunk = chunk.choices[0].delta.content
# 可选:如果想把 reasoning 内容显示出来,可以先显示
if reasoning_chunk:
if not done_reasoning:
bot_reply += "\n\n🔍 推理中...\n"
done_reasoning = True
bot_reply += reasoning_chunk
yield history + [(user_message, bot_reply)]
elif answer_chunk:
# 输出最终回答
if done_reasoning:
bot_reply += "\n\n=== 最终回答 ===\n"
done_reasoning = False
bot_reply += answer_chunk
yield history + [(user_message, bot_reply)]
return stream_response()
with gr.Blocks(theme=gr.themes.Soft(), css="""
.chatbot {background-color: #f7f7f7;}
.message.user {background-color: #007bff; color: white; border-radius: 10px; align-self: flex-end;}
.message.bot {background-color: #eaeaea; color: black; border-radius: 10px; align-self: flex-start;}
""") as demo:
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### 🤖 DeepSeek 模型助手\n仿 DeepSeek 官方界面")
with gr.Column(scale=3):
chatbot = gr.Chatbot(
height=500,
label="对话窗口",
bubble_full_width=False,
show_copy_button=True
)
user_input = gr.Textbox(
placeholder="输入问题,例如:什么是大语言模型?",
label="输入",
lines=1
)
submit_btn = gr.Button("发送 🚀")
submit_btn.click(fn=chat_fn, inputs=[chatbot, user_input], outputs=chatbot)
user_input.submit(fn=chat_fn, inputs=[chatbot, user_input], outputs=chatbot)
demo.launch()
from openai import OpenAI
import gradio as gr
# 初始化 ModelScope 客户端
client = OpenAI(
base_url="https://api-inference.modelscope.cn/v1",
api_key="ms-d948e23a-7f3e-4757-8454-9558849b9fc1", # ModelScope Token
)
def gen_text(user_msg, history):
"""
user_msg: 当前用户输入
history: [(user, bot), ...] 格式的聊天历史
"""
# 先把用户输入加入历史
history = history or []
history.append((user_msg, "🤔 思考中...")) # 先显示思考中
# 构造 messages 发送给模型
messages = []
for user, bot in history[:-1]:
messages.append({"role": "user", "content": user})
messages.append({"role": "assistant", "content": bot})
messages.append({"role": "user", "content": user_msg})
# 生成器:一边接收流,一边更新对话
def stream():
response = client.chat.completions.create(
model="deepseek-ai/DeepSeek-V3.1",
messages=messages,
stream=True,
)
bot_reply = ""
done_reasoning = False
for chunk in response:
reasoning_chunk = chunk.choices[0].delta.reasoning_content
answer_chunk = chunk.choices[0].delta.content
if reasoning_chunk:
# 如果要显示推理过程,可以加到 bot_reply
if not done_reasoning:
bot_reply += "🔍 推理中...\n"
done_reasoning = True
bot_reply += reasoning_chunk
history[-1] = (user_msg, bot_reply)
yield "", history
elif answer_chunk:
# 输出最终回答
bot_reply += answer_chunk
history[-1] = (user_msg, bot_reply)
yield "", history
return stream()
# Gradio 界面
with gr.Blocks() as web_app:
gr.Markdown("## 🤖 DeepSeek 模型助手")
chatbot = gr.Chatbot(type="messages", height=500)
msg = gr.Textbox(placeholder="请输入问题,回车发送")
msg.submit(fn=gen_text, inputs=[msg, chatbot], outputs=[msg, chatbot])
web_app.launch()
from openai import OpenAI
import gradio as gr
client = OpenAI(
base_url='https://api-inference.modelscope.cn/v1',
api_key='ms-d948e23a-7f3e-4757-8454-9558849b9fc1', # ModelScope Token
)
def gen_text(msg, chat_h):
chat_h = chat_h or []
chat_h.append({'role': 'user', 'content': msg})
response = client.chat.completions.create(
model='deepseek-ai/DeepSeek-V3.1', # ModelScope Model-Id
messages=chat_h,
stream=True
)
chat_h.append({'role': 'assistant', 'content': ''})
done_reasoning = False
for chunk in response:
reasoning_chunk = chunk.choices[0].delta.reasoning_content
answer_chunk = chunk.choices[0].delta.content
if reasoning_chunk != '' and reasoning_chunk is not None:
# 这里我们不再 print,而是更新聊天内容
chat_h[-1]['content'] += reasoning_chunk
yield "", chat_h
elif answer_chunk != '' and answer_chunk is not None:
if not done_reasoning:
chat_h[-1]['content'] += "\n\n=== 最终回答 ===\n"
done_reasoning = True
chat_h[-1]['content'] += answer_chunk
yield "", chat_h
# 重点:用 Gradio 美化
with gr.Blocks(css="""
.chatbot {background-color: #f8f9fa;}
.message.user {background-color: #007bff; color: white; border-radius: 10px; align-self: flex-end;}
.message.assistant {background-color: #eaeaea; color: black; border-radius: 10px; align-self: flex-start;}
""") as web_app:
gr.Markdown("<h2 style='text-align:center'>🤖 DeepSeek 大模型助手</h2>")
chtgbot = gr.Chatbot(type='messages', height=500, label="对话窗口")
msg = gr.Textbox(placeholder="请输入问题,回车发送...", show_label=False)
with gr.Row():
clear_btn = gr.Button("🧹 清空对话", scale=1)
submit_btn = gr.Button("🚀 发送", scale=3)
# 提交消息
submit_btn.click(fn=gen_text, inputs=[msg, chtgbot], outputs=[msg, chtgbot])
msg.submit(fn=gen_text, inputs=[msg, chtgbot], outputs=[msg, chtgbot])
# 清空按钮
def clear_chat():
return "", []
clear_btn.click(fn=clear_chat, inputs=None, outputs=[msg, chtgbot])
web_app.launch()
from openai import OpenAI
import gradio as gr
client = OpenAI(
base_url='https://api-inference.modelscope.cn/v1',
api_key='ms-d948e23a-7f3e-4757-8454-9558849b9fc1',
)
def gen_text(msg, chat_h):
chat_h = chat_h or []
chat_h.append({'role': 'user', 'content': msg})
response = client.chat.completions.create(
model='deepseek-ai/DeepSeek-V3.1',
messages=chat_h,
stream=True
)
chat_h.append({'role': 'assistant', 'content': ''})
done_reasoning = False
for chunk in response:
reasoning_chunk = chunk.choices[0].delta.reasoning_content
answer_chunk = chunk.choices[0].delta.content
if reasoning_chunk:
chat_h[-1]['content'] += reasoning_chunk
yield "", chat_h
elif answer_chunk:
if not done_reasoning:
chat_h[-1]['content'] += "\n\n=== 最终回答 ===\n"
done_reasoning = True
chat_h[-1]['content'] += answer_chunk
yield "", chat_h
# 高级感深色 UI CSS
advanced_css = """
body {
background-color: #1b1b1b;
font-family: 'Segoe UI', 'Helvetica Neue', sans-serif;
color: #eaeaea;
margin: 0;
padding: 0;
}
.gradio-container {
display: flex;
height: 100vh;
}
.sidebar {
background-color: #111;
width: 220px;
padding: 20px;
box-shadow: 2px 0 8px rgba(0,0,0,0.4);
display: flex;
flex-direction: column;
justify-content: space-between;
}
.sidebar h3 {
color: #2ecc71;
text-align: center;
}
.sidebar button {
margin-top: 10px;
background: #444 !important;
border-radius: 6px !important;
transition: all 0.2s ease-in-out;
}
.sidebar button:hover {
background: #666 !important;
}
.main-chat {
flex: 1;
display: flex;
flex-direction: column;
justify-content: space-between;
background-color: #1e1e1e;
padding: 10px 20px;
}
.chatbot {
flex: 1;
background-color: #2a2a2a;
border-radius: 12px;
box-shadow: inset 0 0 8px rgba(0,0,0,0.5);
padding: 10px;
}
.message.user {
background-color: #3498db;
color: white;
border-radius: 16px;
padding: 8px 12px;
align-self: flex-end;
max-width: 75%;
}
.message.assistant {
background-color: #27ae60;
color: white;
border-radius: 16px;
padding: 8px 12px;
align-self: flex-start;
max-width: 75%;
white-space: pre-wrap;
}
.input-container {
display: flex;
gap: 10px;
margin-top: 10px;
}
textarea {
background-color: #333 !important;
color: white !important;
border-radius: 6px !important;
border: 1px solid #555 !important;
padding: 8px;
transition: border 0.2s ease-in-out;
}
textarea:focus {
border: 1px solid #2ecc71 !important;
outline: none;
}
button {
background-color: #2ecc71 !important;
color: white !important;
border-radius: 6px !important;
transition: all 0.2s ease-in-out;
}
button:hover {
background-color: #27ae60 !important;
}
"""
with gr.Blocks(css=advanced_css) as web_app:
with gr.Row():
with gr.Column(elem_classes="sidebar", scale=0):
gr.Markdown("### 🤖 DeepSeek\n高级大模型助手")
gr.Markdown("**模型:** DeepSeek-V3.1\n\n**状态:** 在线 ✅")
clear_btn = gr.Button("🧹 清空对话")
gr.Markdown("<sub>© 2025 模拟 DeepSeek UI</sub>")
with gr.Column(elem_classes="main-chat", scale=1):
chtgbot = gr.Chatbot(type='messages', height=500, label=None)
with gr.Row(elem_classes="input-container"):
msg = gr.Textbox(placeholder="请输入问题,回车发送...", show_label=False)
submit_btn = gr.Button("🚀 发送")
submit_btn.click(fn=gen_text, inputs=[msg, chtgbot], outputs=[msg, chtgbot])
msg.submit(fn=gen_text, inputs=[msg, chtgbot], outputs=[msg, chtgbot])
def clear_chat():
return "", []
clear_btn.click(fn=clear_chat, inputs=None, outputs=[msg, chtgbot])
web_app.launch()
from openai import OpenAI
import gradio as gr
client = OpenAI(
base_url='https://api-inference.modelscope.cn/v1',
api_key='ms-d948e23a-7f3e-4757-8454-9558849b9fc1',
)
def gen_text(msg, chat_h):
chat_h = chat_h or []
chat_h.append({'role': 'user', 'content': msg})
response = client.chat.completions.create(
model='deepseek-ai/DeepSeek-V3.1',
messages=chat_h,
stream=True
)
chat_h.append({'role': 'assistant', 'content': ''})
done_reasoning = False
for chunk in response:
reasoning_chunk = chunk.choices[0].delta.reasoning_content
answer_chunk = chunk.choices[0].delta.content
if reasoning_chunk:
chat_h[-1]['content'] += reasoning_chunk
yield "", chat_h
elif answer_chunk:
if not done_reasoning:
chat_h[-1]['content'] += "\n\n---\n"
done_reasoning = True
chat_h[-1]['content'] += answer_chunk
yield "", chat_h
# 终极高级感 CSS
final_css = """
body {
background: radial-gradient(circle at top, #1b1b1b, #0f0f0f);
font-family: 'Inter', 'Helvetica Neue', sans-serif;
color: #f0f0f0;
display: flex;
justify-content: center;
align-items: center;
height: 100vh;
margin: 0;
}
.gradio-container {
width: 100%;
max-width: 1000px;
display: flex;
flex-direction: row;
gap: 20px;
}
.sidebar {
background: #151515;
border: 1px solid #333;
border-radius: 12px;
padding: 20px;
min-width: 200px;
box-shadow: 0 0 12px rgba(0,0,0,0.5);
display: flex;
flex-direction: column;
justify-content: space-between;
}
.sidebar h3 {
color: #00d084;
text-align: center;
}
.main-chat {
flex: 1;
background: #1e1e1e;
border-radius: 16px;
box-shadow: 0 4px 20px rgba(0,0,0,0.6);
display: flex;
flex-direction: column;
overflow: hidden;
}
.chatbot {
flex: 1;
background: #242424;
padding: 20px;
overflow-y: auto;
}
.message.user {
background: linear-gradient(135deg, #3a8dff, #0066ff);
color: white;
border-radius: 16px;
padding: 10px 14px;
align-self: flex-end;
max-width: 75%;
box-shadow: 0 2px 6px rgba(0,0,0,0.3);
}
.message.assistant {
background: #2c2c2c;
border-left: 3px solid #00d084;
color: #e8e8e8;
border-radius: 12px;
padding: 10px 14px;
align-self: flex-start;
max-width: 75%;
white-space: pre-wrap;
box-shadow: 0 2px 6px rgba(0,0,0,0.25);
}
.input-area {
display: flex;
gap: 10px;
padding: 12px 20px;
background: #1e1e1e;
border-top: 1px solid #333;
}
textarea {
background: #2d2d2d !important;
color: white !important;
border-radius: 12px !important;
border: 1px solid #444 !important;
padding: 10px;
flex: 1;
transition: border 0.2s ease-in-out;
}
textarea:focus {
border: 1px solid #00d084 !important;
outline: none;
}
button {
background: #00d084 !important;
color: black !important;
border-radius: 10px !important;
transition: all 0.25s ease-in-out;
}
button:hover {
background: #00b56b !important;
transform: scale(1.03);
}
"""
with gr.Blocks(css=final_css) as web_app:
with gr.Row():
with gr.Column(elem_classes="sidebar", scale=0):
gr.Markdown("### 🤖 DeepSeek\n**V3.1 大模型**")
clear_btn = gr.Button("🧹 清空对话")
gr.Markdown("<sub style='color:#777'>© 2025 模拟 DeepSeek 界面</sub>")
with gr.Column(elem_classes="main-chat", scale=1):
chtgbot = gr.Chatbot(type='messages', elem_classes="chatbot", height=500, label=None)
with gr.Row(elem_classes="input-area"):
msg = gr.Textbox(placeholder="输入问题,按回车或点击发送...", show_label=False)
submit_btn = gr.Button("发送 🚀")
submit_btn.click(fn=gen_text, inputs=[msg, chtgbot], outputs=[msg, chtgbot])
msg.submit(fn=gen_text, inputs=[msg, chtgbot], outputs=[msg, chtgbot])
def clear_chat():
return "", []
clear_btn.click(fn=clear_chat, inputs=None, outputs=[msg, chtgbot])
web_app.launch()
from openai import OpenAI
client = OpenAI(
base_url='https://api-inference.modelscope.cn/v1',
api_key='ms-d948e23a-7f3e-4757-8454-9558849b9fc1', # ModelScope Token
)
# user_input = input("请输入你的问题:")
def gen_text(msg,chat_h):
chat_h.append({
'role': 'user',
'content': msg,
})
response = client.chat.completions.create(
model='deepseek-ai/DeepSeek-V3.1', # ModelScope Model-Id, required
messages=chat_h,
stream=True
)
chat_h.append({
'role':'assistant',
'content':''
})
done_reasoning = False
for chunk in response:
reasoning_chunk = chunk.choices[0].delta.reasoning_content
answer_chunk = chunk.choices[0].delta.content
if reasoning_chunk != '':
print(reasoning_chunk, end='',flush=True)
elif answer_chunk != '':
if not done_reasoning:
print('\n\n === Final Answer ===\n')
done_reasoning = True
print(answer_chunk, end='',flush=True)
chat_h[-1]['content'] += answer_chunk
yield "",chat_h
import gradio as gr
with gr.Blocks() as web_app:
chtgbot = gr.Chatbot(type='messages')
msg = gr.Textbox()
msg.submit(fn=gen_text,inputs=[msg,chtgbot],outputs=[msg,chtgbot])
web_app.launch()
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# 定义 x1 和 x2 的范围
x1 = np.linspace(-5, 5, 100)
x2 = np.linspace(-5, 5, 100)
X1, X2 = np.meshgrid(x1, x2)
# 计算 g(x) 和 y
g = X1 + X2 + 1
Y = np.sin(g)
# 绘制三维曲面
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X1, X2, Y, cmap='viridis')
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_zlabel('y = sin(g(x))')
ax.set_title('y = sin(x1 + x2 + 1)')
plt.show()
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(-10, 10, 100) # x 范围
y = 2*x + 5
plt.plot(x, y, label='y=2x+5')
plt.xlabel('x')
plt.ylabel('y')
plt.title('y = 2x + 5')
plt.grid(True)
plt.legend()
plt.show()