Try Bifrost Enterprise free for 14 days. Explore now
curl --request GET \
--url http://localhost:8080/openai/v1/batches{
"object": "<string>",
"data": [
{
"id": "<string>",
"object": "<string>",
"endpoint": "<string>",
"input_file_id": "<string>",
"completion_window": "<string>",
"status": "validating",
"request_counts": {
"total": 123,
"completed": 123,
"failed": 123,
"succeeded": 123,
"expired": 123,
"canceled": 123,
"pending": 123
},
"metadata": {},
"created_at": 123,
"expires_at": 123,
"in_progress_at": 123,
"finalizing_at": 123,
"completed_at": 123,
"failed_at": 123,
"expired_at": 123,
"cancelling_at": 123,
"cancelled_at": 123,
"output_file_id": "<string>",
"error_file_id": "<string>",
"errors": {
"object": "<string>",
"data": [
{
"code": "<string>",
"message": "<string>",
"param": "<string>",
"line": 123
}
]
},
"processing_status": "<string>",
"results_url": "<string>",
"archived_at": 123,
"operation_name": "<string>",
"done": true,
"progress": 123,
"extra_fields": {
"request_type": "<string>",
"provider": "openai",
"model_requested": "<string>",
"model_deployment": "<string>",
"latency": 123,
"chunk_index": 123,
"raw_request": {},
"raw_response": {},
"cache_debug": {
"cache_hit": true,
"cache_id": "<string>",
"hit_type": "<string>",
"provider_used": "<string>",
"model_used": "<string>",
"input_tokens": 123,
"threshold": 123,
"similarity": 123
}
}
}
],
"first_id": "<string>",
"last_id": "<string>",
"has_more": true,
"next_cursor": "<string>",
"extra_fields": {
"request_type": "<string>",
"provider": "openai",
"model_requested": "<string>",
"model_deployment": "<string>",
"latency": 123,
"chunk_index": 123,
"raw_request": {},
"raw_response": {},
"cache_debug": {
"cache_hit": true,
"cache_id": "<string>",
"hit_type": "<string>",
"provider_used": "<string>",
"model_used": "<string>",
"input_tokens": 123,
"threshold": 123,
"similarity": 123
}
}
}Lists batch processing jobs.
Note: This endpoint also works without the /v1 prefix (e.g., /openai/batches).
curl --request GET \
--url http://localhost:8080/openai/v1/batches{
"object": "<string>",
"data": [
{
"id": "<string>",
"object": "<string>",
"endpoint": "<string>",
"input_file_id": "<string>",
"completion_window": "<string>",
"status": "validating",
"request_counts": {
"total": 123,
"completed": 123,
"failed": 123,
"succeeded": 123,
"expired": 123,
"canceled": 123,
"pending": 123
},
"metadata": {},
"created_at": 123,
"expires_at": 123,
"in_progress_at": 123,
"finalizing_at": 123,
"completed_at": 123,
"failed_at": 123,
"expired_at": 123,
"cancelling_at": 123,
"cancelled_at": 123,
"output_file_id": "<string>",
"error_file_id": "<string>",
"errors": {
"object": "<string>",
"data": [
{
"code": "<string>",
"message": "<string>",
"param": "<string>",
"line": 123
}
]
},
"processing_status": "<string>",
"results_url": "<string>",
"archived_at": 123,
"operation_name": "<string>",
"done": true,
"progress": 123,
"extra_fields": {
"request_type": "<string>",
"provider": "openai",
"model_requested": "<string>",
"model_deployment": "<string>",
"latency": 123,
"chunk_index": 123,
"raw_request": {},
"raw_response": {},
"cache_debug": {
"cache_hit": true,
"cache_id": "<string>",
"hit_type": "<string>",
"provider_used": "<string>",
"model_used": "<string>",
"input_tokens": 123,
"threshold": 123,
"similarity": 123
}
}
}
],
"first_id": "<string>",
"last_id": "<string>",
"has_more": true,
"next_cursor": "<string>",
"extra_fields": {
"request_type": "<string>",
"provider": "openai",
"model_requested": "<string>",
"model_deployment": "<string>",
"latency": 123,
"chunk_index": 123,
"raw_request": {},
"raw_response": {},
"cache_debug": {
"cache_hit": true,
"cache_id": "<string>",
"hit_type": "<string>",
"provider_used": "<string>",
"model_used": "<string>",
"input_tokens": 123,
"threshold": 123,
"similarity": 123
}
}
}Maximum number of batches to return
Cursor for pagination
Filter by provider
Was this page helpful?