带聊天历史的会话
版本一:

from langchain_core.chat_history import (
    BaseChatMessageHistory,
    InMemoryChatMessageHistory,
)
from langchain_core.runnables.history import RunnableWithMessageHistory

store = {}

def get_session_history(session_id: str) -> BaseChatMessageHistory:
    if session_id not in store:
        store[session_id] = InMemoryChatMessageHistory()
    return store[session_id]

with_message_history = RunnableWithMessageHistory(model, get_session_history)

config = {"configurable": {"session_id": "abc2"}}

result = with_message_history.invoke(
    [HumanMessage(content="Hello World!")],
    config=config,
)
result.content

版本二:
封装到chain中

from langchain_core.output_parsers import StrOutputParser
parser = StrOutputParser()

chain = with_message_history | parser

config = {"configurable": {"session_id": "abc3"}}

chain.invoke(
    [HumanMessage(content="你好呀")],
    config=config,
)

chain.invoke(
    [HumanMessage(content="我叫Bob")],
    config=config,
)

chain.invoke(
    [HumanMessage(content="我叫什么名字")],
    config=config,
)

版本三:
封装为chat_with_history函数

def get_session_history(session_id: str) -> BaseChatMessageHistory:
    if session_id not in store:
        store[session_id] = InMemoryChatMessageHistory()
    return store[session_id]

def chat_with_history(input_text, session_id):
    model = ChatOpenAI(model="gpt-3.5-turbo")
    config = {"configurable": {"session_id": session_id}}
    with_message_history = RunnableWithMessageHistory(model, get_session_history)
    result = with_message_history.invoke(
        [HumanMessage(content=input_text)],
        config=config,
    )
    return result.content

chat_with_history("你好鸭,介绍一下你自己吧", "session_1")

版本四:
引入提示词模板

from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder

prompt = ChatPromptTemplate.from_messages(
    [
        ("system", "You are a helpful assistant, Answer the user's question briefly"),
        MessagesPlaceholder(variable_name="messages"),
    ]
)

#prompt.format_messages(messages=[HumanMessage(content="你好鸭,介绍一下你自己吧")])
chain  = prompt | model | parser

#chain.invoke({"messages": [HumanMessage(content="你好鸭,介绍一下你自己吧")]})

with_message_history = RunnableWithMessageHistory(chain, get_session_history)

config = {"configurable": {"session_id": "session_3"}}

response = with_message_history.invoke(
    [HumanMessage(content="你好鸭,介绍一下你自己吧")],
    config=config,
)

response

版本五:
封装为chat_with_prompt函数

def get_session_history(session_id: str) -> BaseChatMessageHistory:
    if session_id not in store:
        store[session_id] = InMemoryChatMessageHistory()
    return store[session_id]

prompt = ChatPromptTemplate.from_messages(
    [
        ("system", "You are a helpful assistant, Answer the user's question briefly"),
        MessagesPlaceholder(variable_name="messages"),
    ]
)

def chat_with_prompt(input_text, session_id, prompt):
    model = ChatOpenAI(model="gpt-3.5-turbo")
    parser = StrOutputParser()
    config = {"configurable": {"session_id": session_id}}
    chain = prompt | model | parser
    with_message_history = RunnableWithMessageHistory(chain, get_session_history)

    result = with_message_history.invoke(
        [HumanMessage(content=input_text)],
        config=config,
    )
    return result

chat_with_prompt("你好鸭,我是Jobs", "session_4", prompt)
chat_with_prompt("给出我的名字的全大写形式", "session_4", prompt)

版本六:
尝试写一个较为完整的提示词

prompt = ChatPromptTemplate.from_messages(
    [
        ( 
            "system", 
            """
            你是一个友善的ChatBot,现在,你将和我玩一个游戏,游戏包括如下步骤:
            1. 询问我的中文名字的拼音首字母拼写

            2. 根据我的回答,猜测我的中文名字,你将至少给出五个猜测,并分别解释每种猜测的寓意

            3. 然后,我将告诉你正确答案,也就是我的名字,这时,你再分析一下我的名字的寓意

            4. 最后,你将总结我的名字的寓意,并给出你的感受
            """,
        ),
        MessagesPlaceholder(variable_name="messages"),
    ]
)

chat_with_prompt("", "session_6", prompt)
chat_with_prompt("lzy", "session_6", prompt)
chat_with_prompt("我的名字是刘振宇", "session_6", prompt)

版本七:
实现一个语言老师ChatBot

def get_session_history(session_id: str) -> BaseChatMessageHistory:
    if session_id not in store:
        store[session_id] = InMemoryChatMessageHistory()
    return store[session_id]

prompt = ChatPromptTemplate.from_messages(
    [
        (
            "system", 
            """
            你是一个语言老师,你将教我{language}语言,
            你将始终使用{language}语言和中文来与我说话,
            每当我与你聊天时,请先给出{language}语言的回答,然后再将你的回答翻译成中文,从而让我有机会学习{language}语言,
            最后,你可以适当给出你的{language}语言的语法要点
            """
        ),
        MessagesPlaceholder(variable_name="messages"),
    ]
)

chain = prompt | model | parser

with_message_history = RunnableWithMessageHistory(
    chain,
    get_session_history,
    input_messages_key="messages",
)

config = {"configurable": {"session_id": "session_7"}}

response = with_message_history.invoke(
    {
        "messages": [HumanMessage(content="你好,我是todd")],
        "language": "English",
    },
    config=config,
)

response

版本八:
重新实现chat_with_prompt,在实现中引入messages键

def get_session_history(session_id: str) -> BaseChatMessageHistory:
    if session_id not in store:
        store[session_id] = InMemoryChatMessageHistory()
    return store[session_id]

prompt = ChatPromptTemplate.from_messages(
    [
        (
            "system", 
            """
            你是一个语言老师,你将教我{language}语言,
            你将始终使用{language}语言和中文来与我说话,
            每当我与你聊天时,请先给出{language}语言的回答,然后再将你的回答翻译成中文,从而让我有机会学习{language}语言,
            最后,你可以适当给出你的{language}语言的语法要点
            """
        ),
        MessagesPlaceholder(variable_name="messages"),
    ]
)

def chat_with_prompt(input_text, session_id, prompt):
    model = ChatOpenAI(model="gpt-3.5-turbo")
    parser = StrOutputParser()
    config = {"configurable": {"session_id": session_id}}
    chain = prompt | model | parser
    with_message_history = RunnableWithMessageHistory(
        chain, 
        get_session_history, 
        input_messages_key="messages",
    )

    result = with_message_history.invoke(
        {
            "messages": [HumanMessage(content=input_text)],
            "language": "English",
        },
        config=config,
    )
    return result

chat_with_prompt("你好鸭", "session_8", prompt)

版本九:
引入流式输出

config = {"configurable": {"session_id": "session_10"}}

for r in with_message_history.stream(
    {
        "messages": [HumanMessage(content="你好鸭, 用python写一个快速排序")],
        "language": "English",
    },
    config=config,
):
    print(r, end="")

版本十:
封装流式输出的函数

def get_session_history(session_id: str) -> BaseChatMessageHistory:
    if session_id not in store:
        store[session_id] = InMemoryChatMessageHistory()
    return store[session_id]

prompt = ChatPromptTemplate.from_messages(
    [
        (
            "system", 
            """
            你是一个语言老师,你将教我{language}语言,
            你将始终使用{language}语言和中文来与我说话,
            每当我与你聊天时,请先给出{language}语言的回答,然后再将你的回答翻译成中文,从而让我有机会学习{language}语言,
            最后,你可以适当给出你的{language}语言的语法要点
            """
        ),
        MessagesPlaceholder(variable_name="messages"),
    ]
)

def chat_with_stream(input_text, session_id, prompt):
    model = ChatOpenAI(model="gpt-3.5-turbo")
    parser = StrOutputParser()
    config = {"configurable": {"session_id": session_id}}
    chain = prompt | model | parser
    with_message_history = RunnableWithMessageHistory(
        chain, 
        get_session_history, 
        input_messages_key="messages",
    )

    for r in with_message_history.stream(
        {
            "messages": [HumanMessage(content=input_text)],
            "language": "English",
        },
        config=config,
    ):
        print(r, end="")

chat_with_stream("你好鸭, 用python写一个快速排序", "session_11", prompt)
最后修改:2024 年 09 月 11 日
如果觉得我的文章对你有用,请随意赞赏