Skip to main content
POST
/
openai
/
deployments
/
{deployment-id}
/
responses
Azure OpenAI Compatible Responses
curl --request POST \
  --url http://localhost:8080/openai/deployments/{deployment-id}/responses \
  --header 'Content-Type: application/json' \
  --data '{
  "model": "<string>",
  "input": "<string>",
  "fallbacks": [
    "<string>"
  ],
  "stream": true,
  "background": true,
  "conversation": "<string>",
  "include": [
    "<string>"
  ],
  "instructions": "<string>",
  "max_output_tokens": 123,
  "max_tool_calls": 123,
  "metadata": {},
  "parallel_tool_calls": true,
  "previous_response_id": "<string>",
  "prompt_cache_key": "<string>",
  "reasoning": {
    "effort": "<string>"
  },
  "safety_identifier": "<string>",
  "service_tier": "<string>",
  "stream_options": {
    "include_usage": true
  },
  "store": true,
  "temperature": 123,
  "text": {
    "max_tokens": 123
  },
  "top_logprobs": 123,
  "top_p": 123,
  "tool_choice": "auto",
  "tools": [
    {
      "type": "function",
      "function": {
        "name": "<string>",
        "arguments": {}
      }
    }
  ],
  "truncation": "<string>"
}'
{
  "id": "chatcmpl-123",
  "object": "chat.completion",
  "choices": [
    {
      "index": 0,
      "message": {
        "role": "user",
        "content": "Hello, how are you?",
        "tool_call_id": "<string>",
        "tool_calls": [
          {
            "id": "tool_123",
            "type": "function",
            "function": {
              "name": "get_weather",
              "arguments": "{\"location\": \"San Francisco, CA\"}"
            }
          }
        ],
        "refusal": "<string>",
        "annotations": [
          {
            "type": "<string>",
            "url_citation": {
              "start_index": 123,
              "end_index": 123,
              "title": "<string>",
              "url": "<string>",
              "sources": "<any>",
              "type": "<string>"
            }
          }
        ],
        "thought": "<string>"
      },
      "finish_reason": "stop",
      "stop": "<string>",
      "log_probs": {
        "content": [
          {
            "bytes": [
              123
            ],
            "logprob": -0.123,
            "token": "hello",
            "top_logprobs": [
              {
                "bytes": [
                  123
                ],
                "logprob": -0.456,
                "token": "world"
              }
            ]
          }
        ],
        "refusal": [
          {
            "bytes": [
              123
            ],
            "logprob": -0.456,
            "token": "world"
          }
        ]
      }
    }
  ],
  "data": [
    {
      "index": 123,
      "object": "<string>",
      "embedding": [
        123
      ]
    }
  ],
  "speech": {
    "usage": {
      "characters": 123
    },
    "audio": "aSDinaTvuI8gbWludGxpZnk="
  },
  "transcribe": {
    "text": "<string>",
    "logprobs": [
      {
        "token": "<string>",
        "log_prob": 123
      }
    ],
    "usage": {
      "prompt_tokens": 123,
      "completion_tokens": 123,
      "total_tokens": 123
    }
  },
  "messages": [
    {
      "role": "user",
      "content": "<string>"
    }
  ],
  "conversation_id": "<string>",
  "finish_reason": "<string>",
  "stop_reason": "<string>",
  "stop_sequence": "<string>",
  "prompt_cache": {
    "status": "<string>"
  },
  "model": "gpt-4o",
  "created": 1677652288,
  "service_tier": "<string>",
  "system_fingerprint": "<string>",
  "usage": {
    "prompt_tokens": 56,
    "completion_tokens": 31,
    "total_tokens": 87,
    "completion_tokens_details": {
      "reasoning_tokens": 123,
      "audio_tokens": 123,
      "accepted_prediction_tokens": 123,
      "rejected_prediction_tokens": 123
    }
  },
  "extra_fields": {
    "provider": "openai",
    "request_type": "list_models",
    "model_requested": "<string>",
    "model_params": {
      "temperature": 0.7,
      "top_p": 0.9,
      "top_k": 40,
      "max_tokens": 1000,
      "stop_sequences": [
        "\n\n",
        "END"
      ],
      "presence_penalty": 0,
      "frequency_penalty": 0,
      "tools": [
        {
          "id": "<string>",
          "type": "function",
          "function": {
            "name": "get_weather",
            "description": "Get current weather for a location",
            "parameters": {
              "type": "object",
              "description": "<string>",
              "properties": {},
              "required": [
                "<string>"
              ],
              "enum": [
                "<string>"
              ]
            }
          }
        }
      ],
      "tool_choice": {
        "type": "auto",
        "function": {
          "name": "get_weather"
        }
      },
      "parallel_tool_calls": true
    },
    "latency": 1234,
    "billed_usage": {
      "prompt_tokens": 123,
      "completion_tokens": 123,
      "search_units": 123,
      "classifications": 123
    },
    "raw_response": {}
  }
}

Path Parameters

deployment-id
string
required

Azure deployment ID

Body

application/json
model
string
required

Model identifier in 'provider/model' format

input
required

Simple text input for the response

fallbacks
string[]
stream
boolean
background
boolean
conversation
string
include
string[]
instructions
string
max_output_tokens
integer
max_tool_calls
integer
metadata
object
parallel_tool_calls
boolean
previous_response_id
string
prompt_cache_key
string
reasoning
object
safety_identifier
string
service_tier
string
stream_options
object
store
boolean
temperature
number
text
object
top_logprobs
integer
top_p
number
tool_choice
Available options:
auto,
any,
none,
required,
tool
tools
object[]
truncation
string

Response

Azure OpenAI-compatible responses response

id
string

Unique response identifier

Example:

"chatcmpl-123"

object
enum<string>

Response type

Available options:
text.completion,
chat.completion,
embedding,
speech,
transcribe,
responses.completion
Example:

"chat.completion"

choices
object[]

Array of completion choices for chat and text completions. Not present for responses type.

data
object[]

Array of embedding objects

speech
object
transcribe
object
messages
object[]

Array of messages for responses type.

conversation_id
string

The conversation ID.

finish_reason
string

The reason the model stopped generating tokens.

stop_reason
string

The reason the model stopped generating tokens.

stop_sequence
string

The stop sequence that was generated.

prompt_cache
object
model
string

Model used for generation

Example:

"gpt-4o"

created
integer

Unix timestamp of creation

Example:

1677652288

service_tier
string

Service tier used

system_fingerprint
string

System fingerprint

usage
object
extra_fields
object