参照: OPRO paper
Automatic Prompt Engineer (APE) – Nextra
LLMが巡回セールスマン問題などの最適化問題を解く〜自分自身で優れたプロンプトを作成&活用〜 | AIDB
訓練データx, yを使用して入力がxの時のyを出力するプロンプトを生成させる。
from typing import List
import os
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
from dotenv import load_dotenv
load_dotenv('.env', override=True)
class AutomaticPromptEngineer:
def __init__(self, x_train: List[str], y_train: List[str], iteration: int ):
if len(x_train) != len(y_train):
raise ValueError("x_train and y_train must have the same length.")
self.x_train = x_train
self.y_train = y_train
self.iteration = iteration
self.llm = ChatOpenAI(model_name=os.environ["OPENAI_CHAT_DEPLOY_NAME"],
engine=os.environ["OPENAI_CHAT_DEPLOY_NAME"], temperature=0)
self.last_succeed_prompt = ""
self.last_failed_prompt = ""
def _generate_prompt(self, x: str, y:str):
input_and_output = f"Input: {x}\\nOutput: {y}\\n\\n"
last_succeed_prompt = f"Last Succeed Prompt: {self.last_succeed_prompt}\\n\\n"
last_failed_prompt = f"Last Failed Prompt: {self.last_failed_prompt}\\n\\n"
template = f"""
You are an engineer who is making prompt for Large Language Models.
You should make a prompt for LLM that takes the following input and just outputs the following output.
The last successful and failed prompts are also exemplified if available. Please use them as hints for improvement.
The placeholder that accepts input is <|input|>.
Let's think about the prompt that can make LLM output the following output from the following input.
# Example Input and Output
{input_and_output}
{last_succeed_prompt}
{last_failed_prompt}
YOUR OUTPUT:"""
res = self.llm([HumanMessage(content=template)]).content
print(res)
return res
def evaluate_prompt(self, prompt: str, x, y):
prompt = prompt.replace("<|input|>", x)
res = self.llm([HumanMessage(content=prompt)]).content
return res == y
def optimize(self):
for _ in range(self.iteration):
for x, y in zip(self.x_train, self.y_train):
prompt = self._generate_prompt(x, y)
if self.evaluate_prompt(prompt, x, y):
self.last_succeed_prompt = prompt
else:
self.last_failed_prompt = prompt
def predict(self, x: str):
return self.llm([HumanMessage(content=self.last_succeed_prompt.replace("<|input|>", x))]).content
x_train = ["abc", "def", "ghi"]
y_train = ["cba", "fed", "ihg"]
x_test = ["foo","bar","baz"]
y_test = ["oof","rab","zab"]
engineer = AutomaticPromptEngineer(x_train, y_train, 5)
engineer.optimize()
print(engineer.predict("foo"))
作成されたプロンプト
Input: Reverse the order of characters in the string <|input|>
Output: ''.join(reversed(<|input|>))