Skip to main content
POST
/
openai
/
v1
/
responses
/
input_tokens
Count input tokens
curl --request POST \
  --url http://localhost:8080/openai/v1/responses/input_tokens \
  --header 'Content-Type: application/json' \
  --data '
{
  "model": "gpt-4",
  "input": "<string>",
  "stream": true,
  "instructions": "<string>",
  "max_output_tokens": 123,
  "metadata": {},
  "parallel_tool_calls": true,
  "previous_response_id": "<string>",
  "reasoning": {
    "effort": "none",
    "generate_summary": "auto",
    "summary": "auto",
    "max_tokens": 123
  },
  "store": true,
  "temperature": 1,
  "text": {
    "format": {
      "type": "text",
      "json_schema": {
        "name": "<string>",
        "schema": {},
        "strict": true
      }
    }
  },
  "tool_choice": "none",
  "tools": [
    {
      "type": "function",
      "name": "<string>",
      "description": "<string>",
      "cache_control": {
        "type": "ephemeral",
        "ttl": "<string>"
      },
      "parameters": {
        "type": "<string>",
        "description": "<string>",
        "required": [
          "<string>"
        ],
        "properties": {},
        "enum": [
          "<string>"
        ],
        "additionalProperties": true
      },
      "strict": true,
      "vector_store_ids": [
        "<string>"
      ],
      "filters": {},
      "max_num_results": 123,
      "ranking_options": {},
      "display_height": 123,
      "display_width": 123,
      "environment": "<string>",
      "enable_zoom": true,
      "search_context_size": "<string>",
      "user_location": {},
      "server_label": "<string>",
      "server_url": "<string>",
      "allowed_tools": {},
      "authorization": "<string>",
      "connector_id": "<string>",
      "headers": {},
      "require_approval": {},
      "server_description": "<string>",
      "container": {},
      "background": "<string>",
      "input_fidelity": "<string>",
      "input_image_mask": {},
      "moderation": "<string>",
      "output_compression": 123,
      "output_format": "<string>",
      "partial_images": 123,
      "quality": "<string>",
      "size": "<string>",
      "format": {}
    }
  ],
  "top_p": 123,
  "truncation": "auto",
  "user": "<string>",
  "fallbacks": [
    "<string>"
  ]
}
'
{
  "object": "<string>",
  "model": "<string>",
  "input_tokens": 123,
  "input_tokens_details": {
    "text_tokens": 123,
    "audio_tokens": 123,
    "image_tokens": 123,
    "cached_tokens": 123
  },
  "tokens": [
    123
  ],
  "token_strings": [
    "<string>"
  ],
  "output_tokens": 123,
  "total_tokens": 123,
  "extra_fields": {
    "request_type": "<string>",
    "provider": "openai",
    "model_requested": "<string>",
    "model_deployment": "<string>",
    "latency": 123,
    "chunk_index": 123,
    "raw_request": {},
    "raw_response": {},
    "cache_debug": {
      "cache_hit": true,
      "cache_id": "<string>",
      "hit_type": "<string>",
      "provider_used": "<string>",
      "model_used": "<string>",
      "input_tokens": 123,
      "threshold": 123,
      "similarity": 123
    }
  }
}

Body

application/json
model
string
required

Model identifier

Example:

"gpt-4"

input
required

Input - can be a string or array of messages

stream
boolean
instructions
string

System instructions for the model

max_output_tokens
integer
metadata
object
parallel_tool_calls
boolean
previous_response_id
string
reasoning
object
store
boolean
temperature
number
Required range: 0 <= x <= 2
text
object
tool_choice
Available options:
none,
auto,
required
tools
object[]
top_p
number
truncation
enum<string>
Available options:
auto,
disabled
user
string
fallbacks
string[]

Response

Successful response

object
string
model
string
input_tokens
integer
input_tokens_details
object
tokens
integer[]
token_strings
string[]
output_tokens
integer
total_tokens
integer
extra_fields
object

Additional fields included in responses