■ RunnableSequence 클래스의 with_config 메소드에서 callbacks 인자를 사용해 콜백 핸들러 리스트를 설정하는 방법을 보여준다.
▶ main.py
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
from dotenv import load_dotenv from langchain_core.callbacks import BaseCallbackHandler from typing import Dict from typing import Any from typing import List from langchain_core.messages import BaseMessage from langchain_core.outputs import LLMResult from langchain_core.prompts import ChatPromptTemplate from langchain_openai import ChatOpenAI load_dotenv() class CustomCallbackHandler(BaseCallbackHandler): def on_chat_model_start(self, serializedDictionary : Dict[str, Any], messageListList : List[List[BaseMessage]], **keywordArgumentDictionary) -> None: print("on_chat_model_start") print() def on_llm_end(self, result : LLMResult, **keywordArgumentDictionary) -> None: print(f"on_llm_end : {result}") print() def on_chain_start(self, serializedDictionary: Dict[str, Any], inputDictionary : Dict[str, Any], **keywordArgumentDictionary) -> None: if serializedDictionary: print(f"on_chain_start : {serializedDictionary.get("name")}") else: print(f"on_chain_start : {serializedDictionary}") print() def on_chain_end(self, outputDictionary : Dict[str, Any], **keywordArgumentDictionary) -> None: print(f"on_chain_end : {outputDictionary}") print() chatPromptTemplate = ChatPromptTemplate.from_template("What is 1 + {number}?") chatOpenAI = ChatOpenAI(model = "gpt-4o") runnableSequence = chatPromptTemplate | chatOpenAI callBackHandlerList = [CustomCallbackHandler()] runnableBinding = runnableSequence.with_config(callbacks = callBackHandlerList) responseAIMessage = runnableBinding.invoke({"number" : "2"}) print(responseAIMessage) """ on_chain_start : None on_chain_start : ChatPromptTemplate on_chain_end : messages=[HumanMessage(content='What is 1 + 2?', additional_kwargs={}, response_metadata={})] on_chat_model_start on_llm_end : generations=[[ChatGeneration(text='1 + 2 equals 3.', generation_info={'finish_reason': 'stop', 'logprobs': None}, message=AIMessage(content='1 + 2 equals 3.', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 8, 'prompt_tokens': 15, 'total_tokens': 23, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-2024-08-06', 'system_fingerprint': 'fp_831e067d82', 'finish_reason': 'stop', 'logprobs': None}, id='run-a0d0dff3-4469-49a3-96ad-41ed409bf28d-0', usage_metadata={'input_tokens': 15, 'output_tokens': 8, 'total_tokens': 23, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}}))]] llm_output={'token_usage': {'completion_tokens': 8, 'prompt_tokens': 15, 'total_tokens': 23, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-2024-08-06', 'system_fingerprint': 'fp_831e067d82'} run=None type='LLMResult' on_chain_end : content='1 + 2 equals 3.' additional_kwargs={'refusal': None} response_metadata={'token_usage': {'completion_tokens': 8, 'prompt_tokens': 15, 'total_tokens': 23, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-2024-08-06', 'system_fingerprint': 'fp_831e067d82', 'finish_reason': 'stop', 'logprobs': None} id='run-a0d0dff3-4469-49a3-96ad-41ed409bf28d-0' usage_metadata={'input_tokens': 15, 'output_tokens': 8, 'total_tokens': 23, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}} content='1 + 2 equals 3.' additional_kwargs={'refusal': None} response_metadata={'token_usage': {'completion_tokens': 8, 'prompt_tokens': 15, 'total_tokens': 23, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-2024-08-06', 'system_fingerprint': 'fp_831e067d82', 'finish_reason': 'stop', 'logprobs': None} id='run-a0d0dff3-4469-49a3-96ad-41ed409bf28d-0' usage_metadata={'input_tokens': 15, 'output_tokens': 8, 'total_tokens': 23, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}} """ |
▶ requirements.txt
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
aiohappyeyeballs==2.4.3 aiohttp==3.11.6 aiosignal==1.3.1 annotated-types==0.7.0 anyio==4.6.2.post1 attrs==24.2.0 certifi==2024.8.30 charset-normalizer==3.4.0 colorama==0.4.6 distro==1.9.0 frozenlist==1.5.0 greenlet==3.1.1 h11==0.14.0 httpcore==1.0.7 httpx==0.27.2 idna==3.10 jiter==0.7.1 jsonpatch==1.33 jsonpointer==3.0.0 langchain==0.3.7 langchain-core==0.3.19 langchain-openai==0.2.9 langchain-text-splitters==0.3.2 langsmith==0.1.143 multidict==6.1.0 numpy==1.26.4 openai==1.54.5 orjson==3.10.11 packaging==24.2 propcache==0.2.0 pydantic==2.9.2 pydantic_core==2.23.4 python-dotenv==1.0.1 PyYAML==6.0.2 regex==2024.11.6 requests==2.32.3 requests-toolbelt==1.0.0 sniffio==1.3.1 SQLAlchemy==2.0.36 tenacity==9.0.0 tiktoken==0.8.0 tqdm==4.67.0 typing_extensions==4.12.2 urllib3==2.2.3 yarl==1.17.2 |
※ pip install python-dotenv langchain langchain-openai 명령을 실행했다.