■ set_llm_cache 함수를 사용해 SQLiteCache 객체를 캐시로 설정하는 방법을 보여준다.
※ OPENAI_API_KEY 환경 변수 값은 .env 파일에 정의한다.
▶ main.py
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
from dotenv import load_dotenv from langchain.globals import set_llm_cache from langchain_community.cache import SQLiteCache from langchain_openai import ChatOpenAI load_dotenv() sqliteCache = SQLiteCache(database_path = "cache.db") set_llm_cache(sqliteCache) chatOpenAI = ChatOpenAI(model = "gpt-3.5-turbo-0125") llmResult1 = chatOpenAI.invoke("고양이 울음소리는?") print(llmResult1) """ content='"야옹"이라고 합니다.' response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 18, 'total_tokens': 29}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None} id='run-70cc38f8-64a1-43a0-b7a5-6e0b7dce0be9-0' usage_metadata={'input_tokens': 18, 'output_tokens': 11, 'total_tokens': 29} """ llmResult2 = chatOpenAI.invoke("고양이 울음소리는?") print(llmResult2) """ content='"야옹"이라고 합니다.' response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 18, 'total_tokens': 29}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None} id='run-70cc38f8-64a1-43a0-b7a5-6e0b7dce0be9-0' usage_metadata={'input_tokens': 18, 'output_tokens': 11, 'total_tokens': 29} """ llmResult3 = chatOpenAI.invoke("까마귀 울음소리는?") print(llmResult3) """ content='까악 까악!' response_metadata={'token_usage': {'completion_tokens': 10, 'prompt_tokens': 20, 'total_tokens': 30}, 'model_name': 'gpt-3.5-turbo-0125', 'system_fingerprint': None, 'finish_reason': 'stop', 'logprobs': None} id='run-61db093c-e5d5-462f-b6a8-702eadcb522c-0' usage_metadata={'input_tokens': 20, 'output_tokens': 10, 'total_tokens': 30} """ |
▶ requirements.txt
1 2 3 |