帮我把这个改成流式响应,供vue调用
#!/usr/bin/env python
from fastapi import FastAPI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_openai import ChatOpenAI
from langserve import add_routes
system_template = “Translate the following into {language}:”
prompt_template = ChatPromptTemplate.from_messages([
(‘system’, system_template),
(‘user’, ‘{text}’)
])
model = ChatOpenAI()
parser = StrOutputParser()
chain = prompt_template | model | parser
app = FastAPI(
title=”LangChain Server”,
version=”1.0″,
description=”A simple API server using LangChain’s Runnable interfaces”,
)
add_routes(
app,
chain,
path=”/chain”,
)
if name “main“:
import uvicorn
uvicorn.run(app, host="localhost", port=8000)
这段代码是python的,启动后是一个服务,帮我改成流式响应,并且能够被vue组件调用
好的,我已经根据您的要求将代码修改为支持流式响应,并确保能够被Vue组件调用。下面是修改后的代码:
#!/usr/bin/env python
import asyncio
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_openai import ChatOpenAI
from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
# 1. App definition
app = FastAPI(
title="LangChain Server",
version="1.0",
description="A simple API server using LangChain's Runnable interfaces with streaming response",
)
@app.post("/stream")
async def stream_endpoint(request: Request):
# 从请求中获取参数
body = await request.json()
text = body.get('text')
language = body.get('language')
# 2. 创建提示模板
system_template = "Translate the following into {language}:"
prompt_template = ChatPromptTemplate.from_messages([
('system', system_template),
('user', '{text}')
])
# 3. 创建异步迭代器回调处理器
callback_handler = AsyncIteratorCallbackHandler()
# 4. 创建模型,启用流式输出并传入回调处理器
model = ChatOpenAI(streaming=True, callbacks=[callback_handler])
# 5. 创建解析器
parser = StrOutputParser()
# 6. 创建链
chain = prompt_template | model | parser
# 7. 准备输入
inputs = {'text': text, 'language': language}
# 8. 异步运行链
task = asyncio.create_task(chain.acall(inputs))
# 9. 定义异步生成器,逐步返回生成的内容
async def event_generator():
async for token in callback_handler.aiter():
yield token
await task # 确保任务完成
# 10. 返回流式响应
return StreamingResponse(event_generator(), media_type="text/plain")
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="localhost", port=8000)
修改说明:
asyncio
:用于处理异步任务。StreamingResponse
:FastAPI提供的流式响应工具。AsyncIteratorCallbackHandler
:LangChain提供的异步回调处理器,用于处理流式输出。/stream
:
@app.post("/stream")
装饰器定义了新的POST接口/stream
。text
和language
两个参数。ChatOpenAI
模型时,设置streaming=True
,并传入callbacks=[callback_handler]
。event_generator
异步生成器函数,从callback_handler
中异步获取生成的内容,并逐步yield
返回。StreamingResponse
将生成器作为响应返回,并设置media_type
为text/plain
。在Vue组件中调用:
在Vue组件中,您可以使用fetch
API或axios
库来调用该接口,并处理返回的流式响应。下面是一个使用fetch
的示例:
// Vue组件中的方法
methods: {
async callStreamAPI() {
const response = await fetch('http://localhost:8000/stream', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
text: 'Hello, how are you?',
language: 'French'
})
});
const reader = response.body.getReader();
const decoder = new TextDecoder('utf-8');
let result = '';
let done = false;
while (!done) {
const { value, done: doneReading } = await reader.read();
done = doneReading;
const chunk = decoder.decode(value);
result += chunk;
// 您可以在这里更新组件的状态,例如追加显示收到的内容
console.log(chunk);
}
console.log('Final result:', result);
}
}
注意事项:
添加CORS中间件示例:
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI(
title="LangChain Server",
version="1.0",
description="A simple API server using LangChain's Runnable interfaces with streaming response",
)
# 允许所有来源的跨域请求(在生产环境中应限制来源)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # 或指定您的前端应用域名
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
希望以上修改能满足您的需求。
powered by kaifamiao