import os
import bs4
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_community.document_loaders import WebBaseLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_chroma import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain.chains import create_retrieval_chain
from langchain_core.prompts import MessagesPlaceholder
from langchain.chains import create_history_aware_retriever
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.messages import AIMessage
# OPANAI API 키를 설정한다.
os.environ["OPENAI_API_KEY"] = "<OPENAI_API_KEY>"
# OPENAI 챗봇 모델을 설정한다.
chatOpenAI = ChatOpenAI(model = "gpt-3.5-turbo-0125")
# 질문-응답 채팅 프롬프트 템플리트를 설정한다.
qnaChatPromptTemplate = ChatPromptTemplate.from_messages(
[
("system", "You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, say that you don't know. Use three sentences maximum and keep the answer concise.\n\n{context}"),
MessagesPlaceholder("chatHistoryList"),
("human", "{input}"),
]
)
# 질문-응답 체인을 생성한다.
qnaRunnableBinding = create_stuff_documents_chain(chatOpenAI, qnaChatPromptTemplate)
# 웹 문서를 로딩한다.
webBaseLoader = WebBaseLoader(
web_paths = ("https://lilianweng.github.io/posts/2023-06-23-agent/",),
bs_kwargs = dict(parse_only = bs4.SoupStrainer(class_ = ("post-content", "post-title", "post-header")))
)
documentList = webBaseLoader.load()
# 문서를 분할한다.
recursiveCharacterTextSplitter = RecursiveCharacterTextSplitter(chunk_size = 1000, chunk_overlap = 200)
splitDocumentList = recursiveCharacterTextSplitter.split_documents(documentList)
# 문서 임베딩 및 검색기를 설정한다.
chroma = Chroma.from_documents(documents = splitDocumentList, embedding = OpenAIEmbeddings())
vectorStoreRetriever = chroma.as_retriever()
# 질문 재구성 채팅 프롬프트 템플리트를 설정한다.
contextualizeQuestionChatPromptTemplate = ChatPromptTemplate.from_messages(
[
("system", "Given a chat history and the latest user question which might reference context in the chat history, formulate a standalone question which can be understood without the chat history. Do NOT answer the question, just reformulate it if needed and otherwise return it as is."),
MessagesPlaceholder("chatHistoryList"),
("human", "{input}"),
]
)
# 히스토리 기반 검색기 체인을 생성한다.
historyAwareRetrieverRunnableBinding = create_history_aware_retriever(chatOpenAI, vectorStoreRetriever, contextualizeQuestionChatPromptTemplate)
# RAG 체인을 생성한다.
ragRunnableBinding = create_retrieval_chain(historyAwareRetrieverRunnableBinding, qnaRunnableBinding)
# 채팅 메시지 히스토리 딕셔너리를 설정한다.
chatMessageHistoryDictionary = {}
def getSessionHistory(sessionID : str) -> BaseChatMessageHistory:
if sessionID not in chatMessageHistoryDictionary:
chatMessageHistoryDictionary[sessionID] = ChatMessageHistory()
return chatMessageHistoryDictionary[sessionID]
# 대화형 RAG 실행 가능 객체를 설정한다.
conversationalRAGRunnableBinding = RunnableWithMessageHistory(
ragRunnableBinding,
getSessionHistory,
input_messages_key = "input",
history_messages_key = "chatHistoryList",
output_messages_key = "answer",
)
# 질문에 대한 응답 수행을 수행한다.
aiMessage1 = conversationalRAGRunnableBinding.invoke(
{"input" : "What is Task Decomposition?"},
config = {"configurable" : {"session_id" : "session1"}}
)
aiMessage2 = conversationalRAGRunnableBinding.invoke(
{"input" : "What are common ways of doing it?"},
config = {"configurable" : {"session_id" : "session1"}},
)
# 채팅 메시지 히스토리 내역을 출력한다.
for message in chatMessageHistoryDictionary["session1"].messages:
if isinstance(message, AIMessage):
prefix = "AI"
else:
prefix = "User"
print(f"{prefix} : {message.content}\n")
"""
User : What is Task Decomposition?
AI : Task decomposition is a technique used to break down complex tasks into smaller, more manageable steps. This process helps agents or models tackle difficult tasks by dividing them into simpler subtasks, making it easier to plan and execute the overall task effectively. Task decomposition can be achieved through methods like prompting the model with step-by-step instructions, task-specific guidance, or human input.
User : What are common ways of doing it?
AI : Task decomposition can be achieved through various methods, including prompting the model with step-by-step instructions such as "Steps for XYZ" or "What are the subgoals for achieving XYZ?", providing task-specific guidance like "Write a story outline" for writing a novel, or incorporating human inputs to guide the decomposition process. These approaches help break down complex tasks into smaller, more manageable steps, enabling agents or models to effectively plan and execute the overall task.
"""