
如何快速實現REST API集成以優化業務流程
# @place: Pudong, Shanghai
# @file: sanic_langchain_stream.py
# @time: 2023/9/19 18:18
# sanic==23.6.0
import asyncio
from sanic import Sanic
from sanic.response import text, json, ResponseStream
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
app = Sanic("benchmark")
@app.route("/")
async def index(request):
return text("hello")
@app.route("/test", methods=["POST"])
async def answer(request):
content = request.json["content"]
return json({"text": content})
@app.route("/csv")
async def test(request):
async def sample_streaming_fn(response):
await response.write("foo,")
await response.write("bar")
return ResponseStream(sample_streaming_fn, content_type="text/csv")
@app.route("/answer/async", methods=["POST"])
async def answer_async(request):
content = request.json["content"]
async def predict(response):
handler = AsyncIteratorCallbackHandler()
model_message = [HumanMessage(content=content)]
chat = ChatOpenAI(streaming=True,
callbacks=[handler],
temperature=0,
openai_api_key="")
asyncio.create_task(chat.apredict_messages(model_message))
async for token in handler.aiter():
await response.write(f"data: {token}\n\n")
return ResponseStream(predict, content_type="text/event-stream")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=3000, debug=False, access_log=True)
# -*- coding: utf-8 -*-
# @place: Pudong, Shanghai
# @file: fastapi_langchain_stream.py
# @time: 2023/9/20 17:36
# fastapi==0.101.1
import uvicorn
import asyncio
from fastapi import FastAPI
from pydantic import BaseModel
from fastapi.responses import StreamingResponse, JSONResponse
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
app = FastAPI(description="langchain_streaming")
class Item(BaseModel):
text: str
class Question(BaseModel):
text: str
async def fake_video_streamer():
for i in range(10):
yield b"some fake video bytes\n"
@app.get("/")
async def main():
return StreamingResponse(fake_video_streamer())
@app.post("/test")
async def test(item: Item):
return JSONResponse({"content": item.text})
@app.post("/answer/async")
async def answer_async(q: Question):
content = q.text
async def predict():
handler = AsyncIteratorCallbackHandler()
model_message = [HumanMessage(content=content)]
chat = ChatOpenAI(streaming=True,
callbacks=[handler],
temperature=0,
openai_api_key="sk-xxx")
asyncio.create_task(chat.apredict_messages(model_message))
async for token in handler.aiter():
yield f"data: {token}\n\n"
return StreamingResponse(predict())
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000, log_level="info")
可視化功能采用HTML,頁面較為簡陋,因筆者的HTML知識有限(甚至可憐)。
HTML文件(form.html):
<body>
<form action="/qa/" method="post">
<p>question: <br><br><textarea rows="3" cols="50" name="question"></textarea></p>
<input type="submit">
</form>
<p>{{ answer }}</p>
</body>
Python代碼:
# -*- coding: utf-8 -*-
# @place: Pudong, Shanghai
# @file: qa_with_form.py
# @time: 2023/9/20 21:36
from starlette.requests import Request
from fastapi import FastAPI, Form
from starlette.templating import Jinja2Templates
import uvicorn
import asyncio
from fastapi.responses import StreamingResponse
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
app = FastAPI()
template = Jinja2Templates(directory='template')
@app.get('/') # 接受get請求
async def get_user(request: Request):
return template.TemplateResponse('form.html', {'request': request})
@app.post('/qa/') # 接受post請求
async def get_user(request: Request,
question: str = Form(...)
):
async def predict():
handler = AsyncIteratorCallbackHandler()
model_message = [HumanMessage(content=question)]
chat = ChatOpenAI(streaming=True,
callbacks=[handler],
temperature=0,
openai_api_key="sk-xxx")
asyncio.create_task(chat.apredict_messages(model_message))
async for token in handler.aiter():
answer = token.replace("\n", "<br>")
yield answer
yield '<br><a href="/" color="red">返回</a>'
return StreamingResponse(predict(), media_type='text/html')
if __name__ == '__main__':
uvicorn.run(app, host='0.0.0.0', port=8080)
演示效果如下面的視頻:
文章轉自微信公眾號@NLP奇幻之旅