Hi Justin! I'm using the helicone proxy gateway to make my OAI API queries. Here are the relevant parts of the code:
gpt_client = openai.OpenAI(
organization=os.environ.get("KIMLAB_OAI_ID"),
api_key=os.environ.get("OPENAI_API_KEY"),
base_url="https://api.openai.com/v1", # This used to be https://oai.helicone.ai/v1
default_headers={
"Helicone-Auth": f"Bearer {os.environ.get('HELICONE_API_KEY')}",
"Helicone-Cache-Enabled": "true",
},
)
^ This instantiates the client for self.client below
def query_llm(self, llm_prompt):
response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": self.sys_prompt},
{"role": "user", "content": llm_prompt}
]
)
return response.choices[0].message.content.replace("\n"," ")
^ This is the function being called below
tests["llm_answer"] = tests["question"].apply(self.query_llm)