Skip to main content
PUT
/
api
/
providers
/
{provider}
Update a provider's configuration
curl --request PUT \
  --url http://localhost:8080/api/providers/{provider} \
  --header 'Content-Type: application/json' \
  --data '{
  "keys": [
    {
      "value": "env.OPENAI_API_KEY",
      "weight": 1,
      "models": [
        "gpt-4o",
        "gpt-4o-mini"
      ],
      "azure_key_config": {
        "endpoint": "https://your-resource.openai.azure.com",
        "deployments": {
          "gpt-4o": "gpt-4o-deployment"
        },
        "api_version": "2024-02-15-preview"
      },
      "vertex_key_config": {
        "project_id": "your-project-id",
        "region": "us-central1",
        "auth_credentials": "env.VERTEX_AUTH_CREDENTIALS"
      },
      "bedrock_key_config": {
        "access_key": "env.AWS_ACCESS_KEY_ID",
        "secret_key": "env.AWS_SECRET_ACCESS_KEY",
        "session_token": "env.AWS_SESSION_TOKEN",
        "region": "us-east-1",
        "arn": "arn:aws:iam::123456789012:role/BedrockRole",
        "deployments": {
          "gpt-4o": "gpt-4o-deployment"
        }
      }
    }
  ],
  "network_config": {
    "timeout": 30,
    "max_retries": 3
  },
  "concurrency_and_buffer_size": {
    "concurrency": 10,
    "buffer_size": 100
  },
  "proxy_config": {
    "url": "http://proxy.example.com:8080",
    "username": "<string>",
    "password": "<string>"
  },
  "send_back_raw_response": true,
  "custom_provider_config": {
    "base_url": "<string>",
    "headers": {},
    "query_params": {}
  }
}'
{
  "name": "openai",
  "keys": [
    {
      "value": "env.OPENAI_API_KEY",
      "weight": 1,
      "models": [
        "gpt-4o",
        "gpt-4o-mini"
      ],
      "azure_key_config": {
        "endpoint": "https://your-resource.openai.azure.com",
        "deployments": {
          "gpt-4o": "gpt-4o-deployment"
        },
        "api_version": "2024-02-15-preview"
      },
      "vertex_key_config": {
        "project_id": "your-project-id",
        "region": "us-central1",
        "auth_credentials": "env.VERTEX_AUTH_CREDENTIALS"
      },
      "bedrock_key_config": {
        "access_key": "env.AWS_ACCESS_KEY_ID",
        "secret_key": "env.AWS_SECRET_ACCESS_KEY",
        "session_token": "env.AWS_SESSION_TOKEN",
        "region": "us-east-1",
        "arn": "arn:aws:iam::123456789012:role/BedrockRole",
        "deployments": {
          "gpt-4o": "gpt-4o-deployment"
        }
      }
    }
  ],
  "network_config": {
    "timeout": 30,
    "max_retries": 3
  },
  "concurrency_and_buffer_size": {
    "concurrency": 10,
    "buffer_size": 100
  },
  "proxy_config": {
    "url": "http://proxy.example.com:8080",
    "username": "<string>",
    "password": "<string>"
  },
  "send_back_raw_response": true,
  "custom_provider_config": {
    "base_url": "<string>",
    "headers": {},
    "query_params": {}
  }
}

Path Parameters

provider
string
required

The name of the provider to update

Body

application/json
keys
object[]

API keys for the provider

network_config
object
concurrency_and_buffer_size
object
proxy_config
object
send_back_raw_response
boolean

Include raw provider response in BifrostResponse

custom_provider_config
object

Response

The updated provider's configuration.

name
enum<string>

AI model provider

Available options:
openai,
anthropic,
azure,
bedrock,
cohere,
vertex,
mistral,
ollama,
gemini,
groq,
openrouter,
sgl,
parasail,
cerebras
Example:

"openai"

keys
object[]

API keys for the provider

network_config
object
concurrency_and_buffer_size
object
proxy_config
object
send_back_raw_response
boolean

Include raw provider response in BifrostResponse

custom_provider_config
object