Try Bifrost Enterprise free for 14 days. Explore now
curl --request POST \
--url http://localhost:8080/v1/count_tokens \
--header 'Content-Type: application/json' \
--data '
{
"model": "<string>",
"messages": [
{
"id": "<string>",
"type": "message",
"status": "in_progress",
"role": "assistant",
"content": "<string>",
"call_id": "<string>",
"name": "<string>",
"arguments": "<string>",
"output": {},
"action": {},
"error": "<string>",
"queries": [
"<string>"
],
"results": [
{}
],
"summary": [
{
"type": "summary_text",
"text": "<string>"
}
],
"encrypted_content": "<string>"
}
],
"fallbacks": [
"<string>"
],
"tools": [
{
"type": "function",
"name": "<string>",
"description": "<string>",
"cache_control": {
"type": "ephemeral",
"ttl": "<string>"
},
"parameters": {
"type": "<string>",
"description": "<string>",
"required": [
"<string>"
],
"properties": {},
"enum": [
"<string>"
],
"additionalProperties": true
},
"strict": true,
"vector_store_ids": [
"<string>"
],
"filters": {},
"max_num_results": 123,
"ranking_options": {},
"display_height": 123,
"display_width": 123,
"environment": "<string>",
"enable_zoom": true,
"search_context_size": "<string>",
"user_location": {},
"server_label": "<string>",
"server_url": "<string>",
"allowed_tools": {},
"authorization": "<string>",
"connector_id": "<string>",
"headers": {},
"require_approval": {},
"server_description": "<string>",
"container": {},
"background": "<string>",
"input_fidelity": "<string>",
"input_image_mask": {},
"moderation": "<string>",
"output_compression": 123,
"output_format": "<string>",
"partial_images": 123,
"quality": "<string>",
"size": "<string>",
"format": {}
}
],
"instructions": "<string>",
"text": "<string>"
}
'{
"object": "<string>",
"model": "<string>",
"input_tokens": 123,
"input_tokens_details": {
"text_tokens": 123,
"audio_tokens": 123,
"image_tokens": 123,
"cached_tokens": 123
},
"tokens": [
123
],
"token_strings": [
"<string>"
],
"output_tokens": 123,
"total_tokens": 123,
"extra_fields": {
"request_type": "<string>",
"provider": "openai",
"model_requested": "<string>",
"model_deployment": "<string>",
"latency": 123,
"chunk_index": 123,
"raw_request": {},
"raw_response": {},
"cache_debug": {
"cache_hit": true,
"cache_id": "<string>",
"hit_type": "<string>",
"provider_used": "<string>",
"model_used": "<string>",
"input_tokens": 123,
"threshold": 123,
"similarity": 123
}
}
}Counts the number of tokens in the provided messages.
curl --request POST \
--url http://localhost:8080/v1/count_tokens \
--header 'Content-Type: application/json' \
--data '
{
"model": "<string>",
"messages": [
{
"id": "<string>",
"type": "message",
"status": "in_progress",
"role": "assistant",
"content": "<string>",
"call_id": "<string>",
"name": "<string>",
"arguments": "<string>",
"output": {},
"action": {},
"error": "<string>",
"queries": [
"<string>"
],
"results": [
{}
],
"summary": [
{
"type": "summary_text",
"text": "<string>"
}
],
"encrypted_content": "<string>"
}
],
"fallbacks": [
"<string>"
],
"tools": [
{
"type": "function",
"name": "<string>",
"description": "<string>",
"cache_control": {
"type": "ephemeral",
"ttl": "<string>"
},
"parameters": {
"type": "<string>",
"description": "<string>",
"required": [
"<string>"
],
"properties": {},
"enum": [
"<string>"
],
"additionalProperties": true
},
"strict": true,
"vector_store_ids": [
"<string>"
],
"filters": {},
"max_num_results": 123,
"ranking_options": {},
"display_height": 123,
"display_width": 123,
"environment": "<string>",
"enable_zoom": true,
"search_context_size": "<string>",
"user_location": {},
"server_label": "<string>",
"server_url": "<string>",
"allowed_tools": {},
"authorization": "<string>",
"connector_id": "<string>",
"headers": {},
"require_approval": {},
"server_description": "<string>",
"container": {},
"background": "<string>",
"input_fidelity": "<string>",
"input_image_mask": {},
"moderation": "<string>",
"output_compression": 123,
"output_format": "<string>",
"partial_images": 123,
"quality": "<string>",
"size": "<string>",
"format": {}
}
],
"instructions": "<string>",
"text": "<string>"
}
'{
"object": "<string>",
"model": "<string>",
"input_tokens": 123,
"input_tokens_details": {
"text_tokens": 123,
"audio_tokens": 123,
"image_tokens": 123,
"cached_tokens": 123
},
"tokens": [
123
],
"token_strings": [
"<string>"
],
"output_tokens": 123,
"total_tokens": 123,
"extra_fields": {
"request_type": "<string>",
"provider": "openai",
"model_requested": "<string>",
"model_deployment": "<string>",
"latency": 123,
"chunk_index": 123,
"raw_request": {},
"raw_response": {},
"cache_debug": {
"cache_hit": true,
"cache_id": "<string>",
"hit_type": "<string>",
"provider_used": "<string>",
"model_used": "<string>",
"input_tokens": 123,
"threshold": 123,
"similarity": 123
}
}
}Was this page helpful?