import { Configuration, OpenAIApi } from "openai"; const configuration = new Configuration({ apiKey: process.env.AZURE_API_KEY, basePath: "https://oai.hconeai.com/v1", baseOptions: { headers: { "Helicone-Auth": `Bearer ${process.env.HELICONE_API_KEY}`, "Helicone-OpenAI-Api-Base": `https://XXXX.openai.azure.com`, } } }); const openai = new OpenAIApi(configuration);
import { Configuration, OpenAIApi } from "azure-openai"; const openai = new OpenAIApi( new Configuration({ apiKey: process.env.AZURE_API_KEY, azure: { apiKey: process.env.AZURE_API_KEY, endpoint: process.env.AZURE_URL, deploymentName: 'XXXXXXX', } }), );
.
for the 3.5 models, but I have tried making requests with 3.5 and 4 and both failhttps://learn.microsoft.com/en-us/azure/ai-services/openai/reference
The endpoints look like: https://YOUR_RESOURCE_NAME.openai.azure.com/openai/deployments/YOUR_DEPLOYMENT_NAME/completions?api-version=2023-05-15
But our current logic strips the entire ending and forces it to be:
https://YOUR_RESOURCE_NAME.openai.azure.com/v1/chat/completions
This causes a 404. The azure endpoints require the url structure. The model is not even in the body. Luckily with Scotts recently change, the model name will be retrieved from the response first.
const configuration = new Configuration({ apiKey: "AZURE_API_KEY", basePath: "https://oai.hconeai.com/openai/deployments/YOUR_DEPLOYMENT_NAME", baseOptions: { headers: { "Helicone-Auth": `Bearer HELICONE_API_KEY`, "api-key": "AZURE_API_KEY", "Helicone-OpenAI-Api-Base": "https://your_resource_name.openai.azure.com", }, params: { "api-version": "API_VERSION", }, }, });
Invalid Prompt
, but the pure JSON tab works. This is likely due to a FE mapping that needs to be updated. CC: Invalid Prompt
fix has now been merged. Everything should work now! https://github.com/Helicone/helicone/pull/707Resource not found
everything looks to be set correctly....basePath
path, so /openai/deployments/[DEPLOYMENT]
Helicone-OpenAI-Api-Base
which should look like https://[AZURE_DOMAIN].openai.azure.com
. It will then add the params to the end as query params. With the final url shaped like:const configuration = new Configuration({ basePath: "https://oai.hconeai.com/openai/deployments/text-davinci-003", baseOptions: { headers: { "Helicone-Auth": `Bearer <>`, "Helicone-OpenAI-API-Base": "https://<>.openai.azure.com", "api-key": "<>", }, }, }); const openai = new OpenAIApi(configuration); const response = await openai.createCompletion({ prompt: "This is a test", model: "text-davinci-003", }); console.log("configuration", response);
const configuration = new Configuration({ basePath: "https://oai.hconeai.com/openai/deployments/text-davinci-003", baseOptions: { headers: { "Helicone-Auth": `Bearer <>`, "Helicone-OpenAI-API-Base": "https://<>.openai.azure.com", "api-key": "<>", }, URLSearchParams: { "api-version": "<>", }, }, });
const configuration = new Configuration({ apiKey: "AZURE_API_KEY", basePath: "https://oai.hconeai.com/openai/deployments/YOUR_DEPLOYMENT_NAME", baseOptions: { headers: { "Helicone-Auth": `Bearer HELICONE_API_KEY`, "api-key": "AZURE_API_KEY", "Helicone-OpenAI-Api-Base": "https://your_resource_name.openai.azure.com", }, params: { "api-version": "API_VERSION", }, }, });