■ trim_messages 함수를 RunnableWithMessageHistory 객체와 함께 사용하는 방법을 보여준다.
※ OPENAI_API_KEY 환경 변수 값은 .env 파일에 정의한다.
▶ main.py
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 |
from dotenv import load_dotenv from langchain_core.messages import SystemMessage from langchain_core.messages import HumanMessage from langchain_core.messages import AIMessage from langchain_openai import ChatOpenAI from langchain_core.messages import trim_messages from langchain_core.chat_history import InMemoryChatMessageHistory from langchain_core.runnables.history import RunnableWithMessageHistory load_dotenv() messageList = [ SystemMessage("you're a good assistant, you always respond with a joke."), HumanMessage("i wonder why it's called langchain"), AIMessage('Well, I guess they thought "WordRope" and "SentenceString" just didn\'t have the same ring to it!'), HumanMessage("and who is harrison chasing anyways"), AIMessage("Hmmm let me think.\n\nWhy, he's probably chasing after the last cup of coffee in the office!") ] chatOpenAI = ChatOpenAI(model = "gpt-4o") runnableLambda = trim_messages( max_tokens = 45, strategy = "last", token_counter = chatOpenAI, include_system = True ) runnableSequence = runnableLambda | chatOpenAI inMemoryChatMessageHistory = InMemoryChatMessageHistory(messages = messageList) def getSessionHistory(session_id): if session_id != "1": return InMemoryChatMessageHistory() return inMemoryChatMessageHistory runnableWithMessageHistory = RunnableWithMessageHistory(runnableSequence, getSessionHistory) responseAIMessage = runnableWithMessageHistory.invoke( [HumanMessage("what do you call a speechless parrot")], config = {"configurable" : {"session_id" : "1"}} ) print(responseAIMessage) """ content ='A bird of few words!' additional_kwargs = {'refusal' : None} response_metadata = { 'token_usage' : { 'completion_tokens' : 6, 'prompt_tokens' : 32, 'total_tokens' : 38, 'completion_tokens_details' : { 'accepted_prediction_tokens' : 0, 'audio_tokens' : 0, 'reasoning_tokens' : 0, 'rejected_prediction_tokens' : 0 }, 'prompt_tokens_details' : {'audio_tokens' : 0, 'cached_tokens' : 0} }, 'model_name' : 'gpt-4o-2024-08-06', 'system_fingerprint' : 'fp_f785eb5f47', 'finish_reason' : 'stop', 'logprobs' : None } id = 'run-d5feab66-cda1-462c-ad1a-a6a8b4de9c8f-0' usage_metadata = { 'input_tokens' : 32, 'output_tokens' : 6, 'total_tokens' : 38, 'input_token_details' : { 'audio' : 0, 'cache_read': 0 }, 'output_token_details' : {'audio' : 0, 'reasoning' : 0} } """ |
▶ requirements.txt
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
annotated-types==0.7.0 anyio==4.7.0 certifi==2024.12.14 charset-normalizer==3.4.0 colorama==0.4.6 distro==1.9.0 h11==0.14.0 httpcore==1.0.7 httpx==0.28.1 idna==3.10 jiter==0.8.2 jsonpatch==1.33 jsonpointer==3.0.0 langchain-core==0.3.25 langchain-openai==0.2.12 langsmith==0.2.3 openai==1.57.4 orjson==3.10.12 packaging==24.2 pydantic==2.10.3 pydantic_core==2.27.1 python-dotenv==1.0.1 PyYAML==6.0.2 regex==2024.11.6 requests==2.32.3 requests-toolbelt==1.0.0 sniffio==1.3.1 tenacity==9.0.0 tiktoken==0.8.0 tqdm==4.67.1 typing_extensions==4.12.2 urllib3==2.2.3 |
※ pip install python-dotenv langchain-openai 명령을 실행했다.