■ ChatPromptTemplate 클래스의 from_messages 정적 메소드에서 출력 구조화를 위해 예제를 명시적 도구 호출로 전달하는 방법을 보여준다.
▶ main.py
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
import os from typing import Optional from typing import Union from langchain_core.pydantic_v1 import BaseModel, Field from langchain_openai import ChatOpenAI from langchain_core.messages import AIMessage, HumanMessage, ToolMessage from langchain_core.prompts import ChatPromptTemplate os.environ["OPENAI_API_KEY"] = "<OPENAI_API_KEY>" class Joke(BaseModel): """Joke to tell user.""" setup : str = Field(description = "The setup of the joke" ) punchline : str = Field(description = "The punchline to the joke" ) rating : Optional[int] = Field(description = "How funny the joke is, from 1 to 10") class ConversationalResponse(BaseModel): """Respond in a conversational manner. Be kind and helpful.""" response : str = Field(description = "A conversational response to the user's query") class Response(BaseModel): output : Union[Joke, ConversationalResponse] chatOpenAI = ChatOpenAI(model = "gpt-3.5-turbo-0125") structuredOutputRunnableSequence = chatOpenAI.with_structured_output(Response) exampleMessageList = [ HumanMessage("Tell me a joke about planes", name = "example_user"), AIMessage( "", name = "example_assistant", tool_calls = [ { "name" : "joke", "args" : { "setup" : "Why don't planes ever get tired?", "punchline" : "Because they have rest wings!", "rating" : 2 }, "id" : "1" } ], ), # 대부분의 도구 호출 모델은 ToolMessage가 도구 호출을 통해 AIMessage를 따라갈 것으로 예상한다. ToolMessage("", tool_call_id = "1"), # 일부 모델에서는 AIMessage가 ToolMessage를 따라갈 것으로 예상하므로 여기에 AIMessage를 추가해야 할 수도 있습니다. HumanMessage("Tell me another joke about planes", name = "example_user"), AIMessage( "", name = "example_assistant", tool_calls = [ { "name" : "joke", "args" : { "setup" : "Cargo", "punchline" : "Cargo 'vroom vroom', but planes go 'zoom zoom'!", "rating" : 10 }, "id" : "2" } ], ), ToolMessage("", tool_call_id = "2"), HumanMessage("Now about caterpillars", name = "example_user"), AIMessage( "", tool_calls = [ { "name" : "joke", "args" : { "setup" : "Caterpillar", "punchline" : "Caterpillar really slow, but watch me turn into a butterfly and steal the show!", "rating" : 5, }, "id" : "3" } ] ), ToolMessage("", tool_call_id = "3") ] systemMessage = """You are a hilarious comedian. Your specialty is knock-knock jokes. \ Return a joke which has the setup (the response to "Who's there?") \ and the final punchline (the response to "<setup> who?").""" chatPromptTemplate = ChatPromptTemplate.from_messages([("system", systemMessage), ("placeholder", "{examples}"), ("human", "{input}")]) fewShotRunnableSequence = chatPromptTemplate | structuredOutputRunnableSequence response = fewShotRunnableSequence.invoke({"input" : "crocodiles", "examples" : exampleMessageList}) print(response) """ output=ConversationalResponse(response='I mainly specialize in knock-knock jokes, but I can try to come up with a crocodile joke for you! Why did the crocodile go to the dentist? To get a new toothbrush!') """ |
▶ requirements.txt
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 |
annotated-types==0.7.0 anyio==4.4.0 certifi==2024.6.2 charset-normalizer==3.3.2 distro==1.9.0 exceptiongroup==1.2.1 h11==0.14.0 httpcore==1.0.5 httpx==0.27.0 idna==3.7 jsonpatch==1.33 jsonpointer==3.0.0 langchain-core==0.2.7 langchain-openai==0.1.8 langsmith==0.1.77 openai==1.34.0 orjson==3.10.5 packaging==24.1 pydantic==2.7.4 pydantic_core==2.18.4 PyYAML==6.0.1 regex==2024.5.15 requests==2.32.3 sniffio==1.3.1 tenacity==8.3.0 tiktoken==0.7.0 tqdm==4.66.4 typing_extensions==4.12.2 urllib3==2.2.1 |
※ pip install langchain-openai 명령을 실행했다.