■ CompiledStateGraph 클래스의 get_state_history 메소드를 사용해 상태 히스토리를 구하는 방법을 보여준다.
※ OPENAI_API_KEY 환경 변수 값은 .env 파일에 정의한다.
※ TAVILY_API_KEY 환경 변수 값은 .env 파일에 정의한다.
▶ main.py
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 |
from dotenv import load_dotenv from typing import Annotated from langchain_openai import ChatOpenAI from langchain_community.tools.tavily_search import TavilySearchResults from langchain_core.messages import AIMessage from langchain_core.messages import ToolMessage from pydantic import BaseModel from typing_extensions import TypedDict from langgraph.checkpoint.memory import MemorySaver from langgraph.graph import StateGraph from langgraph.graph import START from langgraph.graph import END from langgraph.graph.message import add_messages from langgraph.prebuilt import ToolNode from langgraph.prebuilt import tools_condition load_dotenv() chatOpenAI = ChatOpenAI(model = "gpt-4o-mini") tavilySearchResults = TavilySearchResults(max_results = 2) class RequestAssistance(BaseModel): """Escalate the conversation to an expert. Use this if you are unable to assist directly or if the user requires support beyond your permissions. To use this function, relay the user's 'request' so the expert can provide the right guidance. """ request : str toolList = [tavilySearchResults, RequestAssistance] runnableBinding = chatOpenAI.bind_tools(toolList) class State(TypedDict): messages : Annotated[list, add_messages] askHuman : bool def chat(state : State): responseAIMessage = runnableBinding.invoke(state["messages"]) askHuman = False if (responseAIMessage.tool_calls and responseAIMessage.tool_calls[0]["name"] == RequestAssistance.__name__): askHuman = True return {"messages" : [responseAIMessage], "askHuman" : askHuman} def createToolMessage(content : str, aiMessage : AIMessage): return ToolMessage(content = content, tool_call_id = aiMessage.tool_calls[0]["id"]) def checkHuman(state : State): newMessageList = [] if not isinstance(state["messages"][-1], ToolMessage): # 일반적으로 사용자는 인터럽트 중에 상태를 업데이트했을 것이다. # 그렇지 않으면 LLM이 계속 진행될 수 있도록 플레이스홀더 ToolMessage를 포함한다. newMessageList.append(createToolMessage("No response from human.", state["messages"][-1])) return {"messages" : newMessageList, "askHuman" : False} def selectNextNode(state : State): if state["askHuman"]: return "human_node" return tools_condition(state) stateGraph = StateGraph(State) stateGraph.add_node("chatbot_node", chat) stateGraph.add_node("tools" , ToolNode(tools = [tavilySearchResults])) stateGraph.add_node("human_node" , checkHuman) stateGraph.add_conditional_edges("chatbot_node", selectNextNode, {"human_node" : "human_node", "tools" : "tools", END : END}) stateGraph.add_edge("tools" , "chatbot_node") stateGraph.add_edge("human_node", "chatbot_node") stateGraph.add_edge(START , "chatbot_node") memorySaver = MemorySaver() compiledStateGraph = stateGraph.compile(checkpointer = memorySaver, interrupt_before = ["human_node"]) configurableDictionary = {"configurable" : {"thread_id" : "1"}} generator1 = compiledStateGraph.stream( {"messages" : [("user", "I'm learning LangGraph. Could you do some research on it for me?")]}, configurableDictionary, stream_mode = "values" ) for addableUpdatesDict in generator1: if "messages" in addableUpdatesDict: print(addableUpdatesDict["messages"][-1]) print("-" * 50) generator2 = compiledStateGraph.stream( {"messages" : [("user", "Ya that's helpful. Maybe I'll build an autonomous agent with it!")]}, configurableDictionary, stream_mode = "values" ) for addableUpdatesDict in generator2: if "messages" in addableUpdatesDict: print(addableUpdatesDict["messages"][-1]) print("-" * 50) stateToReplay = None for stateSnapshot in compiledStateGraph.get_state_history(configurableDictionary): print(f"Message count : {len(stateSnapshot.values["messages"])}, Next : {stateSnapshot.next}") if len(stateSnapshot.values["messages"]) == 6: # 우리는 상태에서 채팅 메시지 수에 따라 특정 상태를 다소 임의로 선택하고 있다. stateToReplay = stateSnapshot print("-" * 50) # `to_replay.config`의 `checkpoint_id`는 체크 포인터에 저장된 상태에 해당한다. for addableUpdatesDict in compiledStateGraph.stream(None, stateToReplay.config, stream_mode = "values"): if "messages" in addableUpdatesDict: print(addableUpdatesDict["messages"][-1]) print("-" * 50) """ content="I'm learning LangGraph. Could you do some research on it for me?" additional_kwargs={} response_metadata={} id='415a41dd-fd1f-4f41-ae89-618f8139f5f2' content='' additional_kwargs={'tool_calls': [{'id': 'call_ADj6WXkV0LrMxEzCcY46RLa9', 'function': {'arguments': '{"query":"LangGraph"}', 'name': 'tavily_search_results_json'}, 'type': 'function'}], 'refusal': None} response_metadata={'token_usage': {'completion_tokens': 20, 'prompt_tokens': 157, 'total_tokens': 177, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_0aa8d3e20b', 'finish_reason': 'tool_calls', 'logprobs': None} id='run-f645107d-250f-4596-bda4-817e2c65fbb8-0' tool_calls=[{'name': 'tavily_search_results_json', 'args': {'query': 'LangGraph'}, 'id': 'call_ADj6WXkV0LrMxEzCcY46RLa9', 'type': 'tool_call'}] usage_metadata={'input_tokens': 157, 'output_tokens': 20, 'total_tokens': 177, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}} content='[{"url": "https://langchain-ai.github.io/langgraph/", "content": "Overview¶ · LangGraph is a library for building stateful, multi-actor applications with LLMs, used to create agent and multi-agent workflows."}, {"url": "https://langchain-ai.github.io/langgraph/tutorials/introduction/", "content": "LangGraph is a library for building stateful, multi-actor applications with LLMs, used to create agent and multi-agent workflows. Compared to other LLM"}]' name='tavily_search_results_json' id='b6108c11-a863-40d8-8a5b-832af87d0260' tool_call_id='call_ADj6WXkV0LrMxEzCcY46RLa9' artifact={'query': 'LangGraph', 'follow_up_questions': None, 'answer': None, 'images': [], 'results': [{'url': 'https://langchain-ai.github.io/langgraph/', 'title': 'LangGraph', 'content': 'Overview¶ · LangGraph is a library for building stateful, multi-actor applications with LLMs, used to create agent and multi-agent workflows.', 'score': 0.93131906, 'raw_content': None}, {'url': 'https://langchain-ai.github.io/langgraph/tutorials/introduction/', 'title': 'LangGraph Quick Start - GitHub Pages', 'content': 'LangGraph is a library for building stateful, multi-actor applications with LLMs, used to create agent and multi-agent workflows. Compared to other LLM', 'score': 0.915091, 'raw_content': None}], 'response_time': 2.0} content='LangGraph is a library designed for building stateful, multi-actor applications using Large Language Models (LLMs). It focuses on creating agent and multi-agent workflows, allowing developers to leverage the capabilities of LLMs in a structured manner.\n\nFor more detailed information, you can check out the following resources:\n- [LangGraph Overview](https://langchain-ai.github.io/langgraph/)\n- [LangGraph Introduction Tutorial](https://langchain-ai.github.io/langgraph/tutorials/introduction/) \n\nThese resources provide insights into its features, use cases, and how to get started with LangGraph. If you have specific questions or need further assistance, feel free to ask!' additional_kwargs={'refusal': None} response_metadata={'token_usage': {'completion_tokens': 136, 'prompt_tokens': 296, 'total_tokens': 432, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_0aa8d3e20b', 'finish_reason': 'stop', 'logprobs': None} id='run-c470584c-8118-478f-abda-26375a0565e8-0' usage_metadata={'input_tokens': 296, 'output_tokens': 136, 'total_tokens': 432, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}} -------------------------------------------------- content="Ya that's helpful. Maybe I'll build an autonomous agent with it!" additional_kwargs={} response_metadata={} id='eea3bf16-557d-4efb-a101-c297ff5c0b2e' content='That sounds like an exciting project! Building an autonomous agent with LangGraph can open up many possibilities for automation and intelligent workflows. If you have any questions about the development process, specific functionalities, or need tips and resources as you work on your project, feel free to reach out. Good luck with your autonomous agent!' additional_kwargs={'refusal': None} response_metadata={'token_usage': {'completion_tokens': 65, 'prompt_tokens': 451, 'total_tokens': 516, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_0aa8d3e20b', 'finish_reason': 'stop', 'logprobs': None} id='run-e56dd90c-ac11-4ee0-85a2-c86a732d5778-0' usage_metadata={'input_tokens': 451, 'output_tokens': 65, 'total_tokens': 516, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}} -------------------------------------------------- Message count : 6, Next : () Message count : 5, Next : ('chatbot_node',) Message count : 4, Next : ('__start__',) Message count : 4, Next : () Message count : 3, Next : ('chatbot_node',) Message count : 2, Next : ('tools',) Message count : 1, Next : ('chatbot_node',) Message count : 0, Next : ('__start__',) -------------------------------------------------- content='That sounds like an exciting project! Building an autonomous agent with LangGraph can open up many possibilities for automation and intelligent workflows. If you have any questions about the development process, specific functionalities, or need tips and resources as you work on your project, feel free to reach out. Good luck with your autonomous agent!' additional_kwargs={'refusal': None} response_metadata={'token_usage': {'completion_tokens': 65, 'prompt_tokens': 451, 'total_tokens': 516, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_0aa8d3e20b', 'finish_reason': 'stop', 'logprobs': None} id='run-e56dd90c-ac11-4ee0-85a2-c86a732d5778-0' usage_metadata={'input_tokens': 451, 'output_tokens': 65, 'total_tokens': 516, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}} -------------------------------------------------- """ |
▶ requirements.txt
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
aiohappyeyeballs==2.4.4 aiohttp==3.11.11 aiosignal==1.3.2 annotated-types==0.7.0 anyio==4.7.0 attrs==24.3.0 certifi==2024.12.14 charset-normalizer==3.4.1 colorama==0.4.6 dataclasses-json==0.6.7 distro==1.9.0 frozenlist==1.5.0 greenlet==3.1.1 h11==0.14.0 httpcore==1.0.7 httpx==0.28.1 httpx-sse==0.4.0 idna==3.10 jiter==0.8.2 jsonpatch==1.33 jsonpointer==3.0.0 langchain==0.3.13 langchain-community==0.3.13 langchain-core==0.3.28 langchain-openai==0.2.14 langchain-text-splitters==0.3.4 langgraph==0.2.60 langgraph-checkpoint==2.0.9 langgraph-sdk==0.1.48 langsmith==0.2.7 marshmallow==3.23.2 msgpack==1.1.0 multidict==6.1.0 mypy-extensions==1.0.0 numpy==2.2.1 openai==1.58.1 orjson==3.10.13 packaging==24.2 propcache==0.2.1 pydantic==2.10.4 pydantic-settings==2.7.1 pydantic_core==2.27.2 python-dotenv==1.0.1 PyYAML==6.0.2 regex==2024.11.6 requests==2.32.3 requests-toolbelt==1.0.0 sniffio==1.3.1 SQLAlchemy==2.0.36 tenacity==9.0.0 tiktoken==0.8.0 tqdm==4.67.1 typing-inspect==0.9.0 typing_extensions==4.12.2 urllib3==2.3.0 yarl==1.18.3 |
※ pip install python-dotenv langchain-community langchain-openai langgraph 명령을 실행했다.