config-sample.md +250 −114
1# Sample Configuration1# Sample Configuration
2 2
33A complete example config.toml you can copy and adaptUse this example configuration as a starting point. It includes most keys Codex reads from `config.toml`, along with default behaviors, recommended values where helpful, and short notes.
4
5Use this example configuration as a starting point. It includes most keys Codex reads from `config.toml`, along with defaults and short notes.
6 4
7For explanations and guidance, see:5For explanations and guidance, see:
8 6
9- [Config basics](https://developers.openai.com/codex/config-basic)7- [Config basics](https://developers.openai.com/codex/config-basic)
10- [Advanced Config](https://developers.openai.com/codex/config-advanced)8- [Advanced Config](https://developers.openai.com/codex/config-advanced)
11- [Config Reference](https://developers.openai.com/codex/config-reference)9- [Config Reference](https://developers.openai.com/codex/config-reference)
10- [Sandbox and approvals](https://developers.openai.com/codex/agent-approvals-security#sandbox-and-approvals)
11- [Managed configuration](https://developers.openai.com/codex/enterprise/managed-configuration)
12 12
13Use the snippet below as a reference. Copy only the keys and sections you need into `~/.codex/config.toml` (or into a project-scoped `.codex/config.toml`), then adjust values for your setup.13Use the snippet below as a reference. Copy only the keys and sections you need into `~/.codex/config.toml` (or into a project-scoped `.codex/config.toml`), then adjust values for your setup.
14 14
15```toml15```toml
16# Codex example configuration (config.toml)16# Codex example configuration (config.toml)
17#17#
1818# This file lists all keys Codex reads from config.toml, their default values,# This file lists the main keys Codex reads from config.toml, along with default
1919# and concise explanations. Values here mirror the effective defaults compiled# behaviors, recommended examples, and concise explanations. Adjust as needed.
20# into the CLI. Adjust as needed.
21#20#
22# Notes21# Notes
23# - Root keys must appear before tables in TOML.22# - Root keys must appear before tables in TOML.
28# Core Model Selection27# Core Model Selection
29################################################################################28################################################################################
30 29
3130# Primary model used by Codex. Default: "gpt-5.2-codex" on all platforms.# Primary model used by Codex. Recommended example for most users: "gpt-5.4".
3231model = "gpt-5.2-codex"model = "gpt-5.4"
33 32
3433# Default communication style for supported models. Default: "friendly".# Communication style for supported models. Allowed values: none | friendly | pragmatic
3534# Allowed values: none | friendly | pragmatic# personality = "pragmatic"
36# personality = "friendly"
37 35
38# Optional model override for /review. Default: unset (uses current session model).36# Optional model override for /review. Default: unset (uses current session model).
3937# review_model = "gpt-5.2-codex"# review_model = "gpt-5.4"
40 38
41# Provider id selected from [model_providers]. Default: "openai".39# Provider id selected from [model_providers]. Default: "openai".
42model_provider = "openai"40model_provider = "openai"
44# Default OSS provider for --oss sessions. When unset, Codex prompts. Default: unset.42# Default OSS provider for --oss sessions. When unset, Codex prompts. Default: unset.
45# oss_provider = "ollama"43# oss_provider = "ollama"
46 44
4745# Optional manual model metadata. When unset, Codex auto-detects from model.# Preferred service tier. `fast` is honored only when enabled in [features].
4846# Uncomment to force values.# service_tier = "flex" # fast | flex
47
48# Optional manual model metadata. When unset, Codex uses model or preset defaults.
49# model_context_window = 128000 # tokens; default: auto for model49# model_context_window = 128000 # tokens; default: auto for model
5050# model_auto_compact_token_limit = 0 # tokens; unset uses model defaults# model_auto_compact_token_limit = 64000 # tokens; unset uses model defaults
5151# tool_output_token_limit = 10000 # tokens stored per tool output; default: 10000 for gpt-5.2-codex# tool_output_token_limit = 12000 # tokens stored per tool output
52# model_catalog_json = "/absolute/path/to/models.json" # optional startup-only model catalog override
53# background_terminal_max_timeout = 300000 # ms; max empty write_stdin poll window (default 5m)
52# log_dir = "/absolute/path/to/codex-logs" # directory for Codex logs; default: "$CODEX_HOME/log"54# log_dir = "/absolute/path/to/codex-logs" # directory for Codex logs; default: "$CODEX_HOME/log"
55# sqlite_home = "/absolute/path/to/codex-state" # optional SQLite-backed runtime state directory
53 56
54################################################################################57################################################################################
55# Reasoning & Verbosity (Responses API capable models)58# Reasoning & Verbosity (Responses API capable models)
56################################################################################59################################################################################
57 60
5861# Reasoning effort: minimal | low | medium | high | xhigh (default: medium; xhigh on gpt-5.2-codex and gpt-5.2)# Reasoning effort: minimal | low | medium | high | xhigh
5962model_reasoning_effort = "medium"# model_reasoning_effort = "medium"
63
64# Optional override used when Codex runs in plan mode: none | minimal | low | medium | high | xhigh
65# plan_mode_reasoning_effort = "high"
60 66
6167# Reasoning summary: auto | concise | detailed | none (default: auto)# Reasoning summary: auto | concise | detailed | none
62# model_reasoning_summary = "auto"68# model_reasoning_summary = "auto"
63 69
6470# Text verbosity for GPT-5 family (Responses API): low | medium | high (default: medium)# Text verbosity for GPT-5 family (Responses API): low | medium | high
65# model_verbosity = "medium"71# model_verbosity = "medium"
66 72
6773# Force enable or disable reasoning summaries for current model# Force enable or disable reasoning summaries for current model.
68# model_supports_reasoning_summaries = true74# model_supports_reasoning_summaries = true
69 75
70################################################################################76################################################################################
74# Additional user instructions are injected before AGENTS.md. Default: unset.80# Additional user instructions are injected before AGENTS.md. Default: unset.
75# developer_instructions = ""81# developer_instructions = ""
76 82
77# (Ignored) Optional legacy base instructions override (prefer AGENTS.md). Default: unset.
78# instructions = ""
79
80# Inline override for the history compaction prompt. Default: unset.83# Inline override for the history compaction prompt. Default: unset.
81# compact_prompt = ""84# compact_prompt = ""
82 85
86# Override the default commit co-author trailer. Set to "" to disable it.
87# commit_attribution = "Jane Doe <jane@example.com>"
88
83# Override built-in base instructions with a file path. Default: unset.89# Override built-in base instructions with a file path. Default: unset.
84# model_instructions_file = "/absolute/or/relative/path/to/instructions.txt"90# model_instructions_file = "/absolute/or/relative/path/to/instructions.txt"
85 91
86# Migration note: experimental_instructions_file was renamed to model_instructions_file (deprecated).
87
88# Load the compact prompt override from a file. Default: unset.92# Load the compact prompt override from a file. Default: unset.
89# experimental_compact_prompt_file = "/absolute/or/relative/path/to/compact_prompt.txt"93# experimental_compact_prompt_file = "/absolute/or/relative/path/to/compact_prompt.txt"
90 94
91# Legacy name for apply_patch_freeform. Default: false
92include_apply_patch_tool = false
93
94################################################################################95################################################################################
95# Notifications96# Notifications
96################################################################################97################################################################################
97 98
98# External notifier program (argv array). When unset: disabled.99# External notifier program (argv array). When unset: disabled.
99100# Example: notify = ["notify-send", "Codex"]# notify = ["notify-send", "Codex"]
100notify = [ ]
101 101
102################################################################################102################################################################################
103# Approval & Sandbox103# Approval & Sandbox
107# - untrusted: only known-safe read-only commands auto-run; others prompt107# - untrusted: only known-safe read-only commands auto-run; others prompt
108# - on-request: model decides when to ask (default)108# - on-request: model decides when to ask (default)
109# - never: never prompt (risky)109# - never: never prompt (risky)
110# - { granular = { ... } }: allow or auto-reject selected prompt categories
110approval_policy = "on-request"111approval_policy = "on-request"
112# Who reviews eligible approval prompts: user (default) | guardian_subagent
113# approvals_reviewer = "user"
114
115# Example granular policy:
116# approval_policy = { granular = {
117# sandbox_approval = true,
118# rules = true,
119# mcp_elicitations = true,
120# request_permissions = false,
121# skill_approval = false
122# } }
123
124# Allow login-shell semantics for shell-based tools when they request `login = true`.
125# Default: true. Set false to force non-login shells and reject explicit login-shell requests.
126allow_login_shell = true
111 127
112# Filesystem/network sandbox policy for tool calls:128# Filesystem/network sandbox policy for tool calls:
113# - read-only (default)129# - read-only (default)
114# - workspace-write130# - workspace-write
115# - danger-full-access (no sandbox; extremely risky)131# - danger-full-access (no sandbox; extremely risky)
116sandbox_mode = "read-only"132sandbox_mode = "read-only"
133# Named permissions profile to apply by default. Required before using [permissions.<name>].
134# default_permissions = "workspace"
135
136# Example filesystem profile. Use `"none"` to deny reads for exact paths or
137# glob patterns. On platforms that need pre-expanded glob matches, set
138# glob_scan_max_depth when using unbounded patterns such as `**`.
139# [permissions.workspace.filesystem]
140# glob_scan_max_depth = 3
141# ":project_roots" = { "." = "write", "**/*.env" = "none" }
142# "/absolute/path/to/secrets" = "none"
117 143
118################################################################################144################################################################################
119# Authentication & Login145# Authentication & Login
122# Where to persist CLI login credentials: file (default) | keyring | auto148# Where to persist CLI login credentials: file (default) | keyring | auto
123cli_auth_credentials_store = "file"149cli_auth_credentials_store = "file"
124 150
125151# Base URL for ChatGPT auth flow (not OpenAI API). Default:# Base URL for ChatGPT auth flow (not OpenAI API).
126chatgpt_base_url = "https://chatgpt.com/backend-api/"152chatgpt_base_url = "https://chatgpt.com/backend-api/"
127 153
154# Optional base URL override for the built-in OpenAI provider.
155# openai_base_url = "https://us.api.openai.com/v1"
156
128# Restrict ChatGPT login to a specific workspace id. Default: unset.157# Restrict ChatGPT login to a specific workspace id. Default: unset.
129158# forced_chatgpt_workspace_id = ""# forced_chatgpt_workspace_id = "00000000-0000-0000-0000-000000000000"
130 159
131# Force login mechanism when Codex would normally auto-select. Default: unset.160# Force login mechanism when Codex would normally auto-select. Default: unset.
132# Allowed values: chatgpt | api161# Allowed values: chatgpt | api
134 163
135# Preferred store for MCP OAuth credentials: auto (default) | file | keyring164# Preferred store for MCP OAuth credentials: auto (default) | file | keyring
136mcp_oauth_credentials_store = "auto"165mcp_oauth_credentials_store = "auto"
137
138# Optional fixed port for MCP OAuth callback: 1-65535. Default: unset.166# Optional fixed port for MCP OAuth callback: 1-65535. Default: unset.
139# mcp_oauth_callback_port = 4321167# mcp_oauth_callback_port = 4321
168# Optional redirect URI override for MCP OAuth login (for example, remote devbox ingress).
169# Custom callback paths are supported. `mcp_oauth_callback_port` still controls the listener port.
170# mcp_oauth_callback_url = "https://devbox.example.internal/callback"
140 171
141################################################################################172################################################################################
142# Project Documentation Controls173# Project Documentation Controls
187# If you use --yolo or another full access sandbox setting, web search defaults to live.218# If you use --yolo or another full access sandbox setting, web search defaults to live.
188web_search = "cached"219web_search = "cached"
189 220
221# Active profile name. When unset, no profile is applied.
222# profile = "default"
223
224# Suppress the warning shown when under-development feature flags are enabled.
225# suppress_unstable_features_warning = true
226
190################################################################################227################################################################################
191228# Profiles (named presets)# Agents (multi-agent roles and limits)
192################################################################################229################################################################################
193 230
194231# Active profile name. When unset, no profile is applied.[agents]
195232# profile = "default"# Maximum concurrently open agent threads. Default: 6
233# max_threads = 6
234# Maximum nested spawn depth. Root session starts at depth 0. Default: 1
235# max_depth = 1
236# Default timeout per worker for spawn_agents_on_csv jobs. When unset, the tool defaults to 1800 seconds.
237# job_max_runtime_seconds = 1800
238
239# [agents.reviewer]
240# description = "Find correctness, security, and test risks in code."
241# config_file = "./agents/reviewer.toml" # relative to the config.toml that defines it
242# nickname_candidates = ["Athena", "Ada"]
196 243
197################################################################################244################################################################################
198# Skills (per-skill overrides)245# Skills (per-skill overrides)
200 247
201# Disable or re-enable a specific skill without deleting it.248# Disable or re-enable a specific skill without deleting it.
202[[skills.config]]249[[skills.config]]
203250# path = "/path/to/skill"# path = "/path/to/skill/SKILL.md"
204# enabled = false251# enabled = false
205 252
206################################################################################
207# Experimental toggles (legacy; prefer [features])
208################################################################################
209
210experimental_use_unified_exec_tool = false
211
212# Include apply_patch via freeform editing path (affects default tool set). Default: false
213experimental_use_freeform_apply_patch = false
214
215################################################################################253################################################################################
216# Sandbox settings (tables)254# Sandbox settings (tables)
217################################################################################255################################################################################
234[shell_environment_policy]272[shell_environment_policy]
235# inherit: all (default) | core | none273# inherit: all (default) | core | none
236inherit = "all"274inherit = "all"
237275# Skip default excludes for names containing KEY/SECRET/TOKEN (case-insensitive). Default: true# Skip default excludes for names containing KEY/SECRET/TOKEN (case-insensitive). Default: false
238276ignore_default_excludes = trueignore_default_excludes = false
239# Case-insensitive glob patterns to remove (e.g., "AWS_*", "AZURE_*"). Default: []277# Case-insensitive glob patterns to remove (e.g., "AWS_*", "AZURE_*"). Default: []
240exclude = []278exclude = []
241# Explicit key/value overrides (always win). Default: {}279# Explicit key/value overrides (always win). Default: {}
245# Experimental: run via user shell profile. Default: false283# Experimental: run via user shell profile. Default: false
246experimental_use_profile = false284experimental_use_profile = false
247 285
286################################################################################
287# Managed network proxy settings
288################################################################################
289
290# Set `default_permissions = "workspace"` before enabling this profile.
291# [permissions.workspace.network]
292# enabled = true
293# proxy_url = "http://127.0.0.1:43128"
294# admin_url = "http://127.0.0.1:43129"
295# enable_socks5 = false
296# socks_url = "http://127.0.0.1:43130"
297# enable_socks5_udp = false
298# allow_upstream_proxy = false
299# dangerously_allow_non_loopback_proxy = false
300# dangerously_allow_non_loopback_admin = false
301# dangerously_allow_all_unix_sockets = false
302# mode = "limited" # limited | full
303# allow_local_binding = false
304#
305# [permissions.workspace.network.domains]
306# "api.openai.com" = "allow"
307# "example.com" = "deny"
308#
309# [permissions.workspace.network.unix_sockets]
310# "/var/run/docker.sock" = "allow"
311
248################################################################################312################################################################################
249# History (table)313# History (table)
250################################################################################314################################################################################
253# save-all (default) | none317# save-all (default) | none
254persistence = "save-all"318persistence = "save-all"
255# Maximum bytes for history file; oldest entries are trimmed when exceeded. Example: 5242880319# Maximum bytes for history file; oldest entries are trimmed when exceeded. Example: 5242880
256320# max_bytes = 0# max_bytes = 5242880
257 321
258################################################################################322################################################################################
259# UI, Notifications, and Misc (tables)323# UI, Notifications, and Misc (tables)
267# Notification mechanism for terminal alerts: auto | osc9 | bel. Default: "auto"331# Notification mechanism for terminal alerts: auto | osc9 | bel. Default: "auto"
268# notification_method = "auto"332# notification_method = "auto"
269 333
334# When notifications fire: unfocused (default) | always
335# notification_condition = "unfocused"
336
270# Enables welcome/status/spinner animations. Default: true337# Enables welcome/status/spinner animations. Default: true
271animations = true338animations = true
272 339
276# Control alternate screen usage (auto skips it in Zellij to preserve scrollback).343# Control alternate screen usage (auto skips it in Zellij to preserve scrollback).
277# alternate_screen = "auto"344# alternate_screen = "auto"
278 345
279346# Ordered list of footer status-line item IDs. Default: null (disabled).# Ordered list of footer status-line item IDs. When unset, Codex uses:
347# ["model-with-reasoning", "context-remaining", "current-dir"].
348# Set to [] to hide the footer.
280# status_line = ["model", "context-remaining", "git-branch"]349# status_line = ["model", "context-remaining", "git-branch"]
281 350
351# Ordered list of terminal window/tab title item IDs. When unset, Codex uses:
352# ["spinner", "project"]. Set to [] to clear the title.
353# Available IDs include app-name, project, spinner, status, thread, git-branch, model,
354# and task-progress.
355# terminal_title = ["spinner", "project"]
356
357# Syntax-highlighting theme (kebab-case). Use /theme in the TUI to preview and save.
358# You can also add custom .tmTheme files under $CODEX_HOME/themes.
359# theme = "catppuccin-mocha"
360
361# Internal tooltip state keyed by model slug. Usually managed by Codex.
362# [tui.model_availability_nux]
363# "gpt-5.4" = 1
364
365# Enable or disable analytics for this machine. When unset, Codex uses its default behavior.
366[analytics]
367enabled = true
368
282# Control whether users can submit feedback from `/feedback`. Default: true369# Control whether users can submit feedback from `/feedback`. Default: true
283[feedback]370[feedback]
284enabled = true371enabled = true
290# hide_rate_limit_model_nudge = true377# hide_rate_limit_model_nudge = true
291# hide_gpt5_1_migration_prompt = true378# hide_gpt5_1_migration_prompt = true
292# "hide_gpt-5.1-codex-max_migration_prompt" = true379# "hide_gpt-5.1-codex-max_migration_prompt" = true
293380# model_migrations = { "gpt-4.1" = "gpt-5.1" }# model_migrations = { "gpt-5.3-codex" = "gpt-5.4" }
294
295# Suppress the warning shown when under-development feature flags are enabled.
296# suppress_unstable_features_warning = true
297 381
298################################################################################382################################################################################
299# Centralized Feature Flags (preferred)383# Centralized Feature Flags (preferred)
301 385
302[features]386[features]
303# Leave this table empty to accept defaults. Set explicit booleans to opt in/out.387# Leave this table empty to accept defaults. Set explicit booleans to opt in/out.
304388shell_tool = true# shell_tool = true
305# apps = false389# apps = false
306390# apps_mcp_gateway = false# codex_hooks = false
307391# Deprecated legacy toggles; prefer the top-level `web_search` setting.# unified_exec = true
308392# web_search = false# shell_snapshot = true
309393# web_search_cached = false# multi_agent = true
310# web_search_request = false
311unified_exec = false
312shell_snapshot = false
313apply_patch_freeform = false
314# search_tool = false
315# personality = true394# personality = true
316395request_rule = true# fast_mode = true
317396collaboration_modes = true# guardian_approval = false
318397use_linux_sandbox_bwrap = false# enable_request_compression = true
319398experimental_windows_sandbox = false# skill_mcp_dependency_install = true
320399elevated_windows_sandbox = false# prevent_idle_sleep = false
321400remote_models = false
322401runtime_metrics = false################################################################################
323402powershell_utf8 = true# Memories (table)
324403child_agents_md = false################################################################################
404
405# Enable memories with [features].memories, then tune memory behavior here.
406# [memories]
407# generate_memories = true
408# use_memories = true
409# disable_on_external_context = false # legacy alias: no_memories_if_mcp_or_web_search
325 410
326################################################################################411################################################################################
327# Define MCP servers under this table. Leave empty to disable.412# Define MCP servers under this table. Leave empty to disable.
336# command = "docs-server" # required421# command = "docs-server" # required
337# args = ["--port", "4000"] # optional422# args = ["--port", "4000"] # optional
338# env = { "API_KEY" = "value" } # optional key/value pairs copied as-is423# env = { "API_KEY" = "value" } # optional key/value pairs copied as-is
339424# env_vars = ["ANOTHER_SECRET"] # optional: forward these from the parent env# env_vars = ["ANOTHER_SECRET"] # optional: forward local parent env vars
425# env_vars = ["LOCAL_TOKEN", { name = "REMOTE_TOKEN", source = "remote" }]
340# cwd = "/path/to/server" # optional working directory override426# cwd = "/path/to/server" # optional working directory override
427# experimental_environment = "remote" # experimental: run stdio via a remote executor
341# startup_timeout_sec = 10.0 # optional; default 10.0 seconds428# startup_timeout_sec = 10.0 # optional; default 10.0 seconds
342# # startup_timeout_ms = 10000 # optional alias for startup timeout (milliseconds)429# # startup_timeout_ms = 10000 # optional alias for startup timeout (milliseconds)
343# tool_timeout_sec = 60.0 # optional; default 60.0 seconds430# tool_timeout_sec = 60.0 # optional; default 60.0 seconds
344# enabled_tools = ["search", "summarize"] # optional allow-list431# enabled_tools = ["search", "summarize"] # optional allow-list
345# disabled_tools = ["slow-tool"] # optional deny-list (applied after allow-list)432# disabled_tools = ["slow-tool"] # optional deny-list (applied after allow-list)
433# scopes = ["read:docs"] # optional OAuth scopes
434# oauth_resource = "https://docs.example.com/" # optional OAuth resource
346 435
347# --- Example: Streamable HTTP transport ---436# --- Example: Streamable HTTP transport ---
348# [mcp_servers.github]437# [mcp_servers.github]
355# startup_timeout_sec = 10.0 # optional444# startup_timeout_sec = 10.0 # optional
356# tool_timeout_sec = 60.0 # optional445# tool_timeout_sec = 60.0 # optional
357# enabled_tools = ["list_issues"] # optional allow-list446# enabled_tools = ["list_issues"] # optional allow-list
447# disabled_tools = ["delete_issue"] # optional deny-list
448# scopes = ["repo"] # optional OAuth scopes
358 449
359################################################################################450################################################################################
360# Model Providers451# Model Providers
361################################################################################452################################################################################
362 453
363# Built-ins include:454# Built-ins include:
364455# - openai (Responses API; requires login or OPENAI_API_KEY via auth flow)# - openai
365456# - oss (Chat Completions API; defaults to http://localhost:11434/v1)# - ollama
457# - lmstudio
458# These IDs are reserved. Use a different ID for custom providers.
366 459
367[model_providers]460[model_providers]
368 461
370# [model_providers.openaidr]463# [model_providers.openaidr]
371# name = "OpenAI Data Residency"464# name = "OpenAI Data Residency"
372# base_url = "https://us.api.openai.com/v1" # example with 'us' domain prefix465# base_url = "https://us.api.openai.com/v1" # example with 'us' domain prefix
373466# wire_api = "responses" # "responses" | "chat" (default varies)# wire_api = "responses" # only supported value
374467# # requires_openai_auth = true # built-in OpenAI defaults to true# # requires_openai_auth = true # use only for providers backed by OpenAI auth
375# # request_max_retries = 4 # default 4; max 100468# # request_max_retries = 4 # default 4; max 100
376# # stream_max_retries = 5 # default 5; max 100469# # stream_max_retries = 5 # default 5; max 100
377# # stream_idle_timeout_ms = 300000 # default 300_000 (5m)470# # stream_idle_timeout_ms = 300000 # default 300_000 (5m)
471# # supports_websockets = true # optional
378# # experimental_bearer_token = "sk-example" # optional dev-only direct bearer token472# # experimental_bearer_token = "sk-example" # optional dev-only direct bearer token
379# # http_headers = { "X-Example" = "value" }473# # http_headers = { "X-Example" = "value" }
380# # env_http_headers = { "OpenAI-Organization" = "OPENAI_ORGANIZATION", "OpenAI-Project" = "OPENAI_PROJECT" }474# # env_http_headers = { "OpenAI-Organization" = "OPENAI_ORGANIZATION", "OpenAI-Project" = "OPENAI_PROJECT" }
381 475
382476# --- Example: Azure (Chat/Responses depending on endpoint) ---# --- Example: Azure/OpenAI-compatible provider ---
383# [model_providers.azure]477# [model_providers.azure]
384# name = "Azure"478# name = "Azure"
385# base_url = "https://YOUR_PROJECT_NAME.openai.azure.com/openai"479# base_url = "https://YOUR_PROJECT_NAME.openai.azure.com/openai"
386480# wire_api = "responses" # or "chat" per endpoint# wire_api = "responses"
387# query_params = { api-version = "2025-04-01-preview" }481# query_params = { api-version = "2025-04-01-preview" }
388# env_key = "AZURE_OPENAI_API_KEY"482# env_key = "AZURE_OPENAI_API_KEY"
389483# # env_key_instructions = "Set AZURE_OPENAI_API_KEY in your environment"# env_key_instructions = "Set AZURE_OPENAI_API_KEY in your environment"
484# # supports_websockets = false
485
486# --- Example: command-backed bearer token auth ---
487# [model_providers.proxy]
488# name = "OpenAI using LLM proxy"
489# base_url = "https://proxy.example.com/v1"
490# wire_api = "responses"
491#
492# [model_providers.proxy.auth]
493# command = "/usr/local/bin/fetch-codex-token"
494# args = ["--audience", "codex"]
495# timeout_ms = 5000
496# refresh_interval_ms = 300000
390 497
391# --- Example: Local OSS (e.g., Ollama-compatible) ---498# --- Example: Local OSS (e.g., Ollama-compatible) ---
392499# [model_providers.ollama]# [model_providers.local_ollama]
393# name = "Ollama"500# name = "Ollama"
394# base_url = "http://localhost:11434/v1"501# base_url = "http://localhost:11434/v1"
395502# wire_api = "chat"# wire_api = "responses"
503
504################################################################################
505# Apps / Connectors
506################################################################################
507
508# Optional per-app controls.
509[apps]
510# [_default] applies to all apps unless overridden per app.
511# [apps._default]
512# enabled = true
513# destructive_enabled = true
514# open_world_enabled = true
515#
516# [apps.google_drive]
517# enabled = false
518# destructive_enabled = false # block destructive-hint tools for this app
519# default_tools_enabled = true
520# default_tools_approval_mode = "prompt" # auto | prompt | approve
521#
522# [apps.google_drive.tools."files/delete"]
523# enabled = false
524# approval_mode = "approve"
525
526# Optional tool suggestion allowlist for connectors or plugins Codex can offer to install.
527# [tool_suggest]
528# discoverables = [
529# { type = "connector", id = "gmail" },
530# { type = "plugin", id = "figma@openai-curated" },
531# ]
396 532
397################################################################################533################################################################################
398# Profiles (named presets)534# Profiles (named presets)
401[profiles]537[profiles]
402 538
403# [profiles.default]539# [profiles.default]
404540# model = "gpt-5.2-codex"# model = "gpt-5.4"
405# model_provider = "openai"541# model_provider = "openai"
406# approval_policy = "on-request"542# approval_policy = "on-request"
407# sandbox_mode = "read-only"543# sandbox_mode = "read-only"
544# service_tier = "flex"
408# oss_provider = "ollama"545# oss_provider = "ollama"
409# model_reasoning_effort = "medium"546# model_reasoning_effort = "medium"
547# plan_mode_reasoning_effort = "high"
410# model_reasoning_summary = "auto"548# model_reasoning_summary = "auto"
411# model_verbosity = "medium"549# model_verbosity = "medium"
412550# personality = "friendly" # or "pragmatic" or "none"# personality = "pragmatic" # or "friendly" or "none"
413# chatgpt_base_url = "https://chatgpt.com/backend-api/"551# chatgpt_base_url = "https://chatgpt.com/backend-api/"
552# model_catalog_json = "./models.json"
553# model_instructions_file = "/absolute/or/relative/path/to/instructions.txt"
414# experimental_compact_prompt_file = "./compact_prompt.txt"554# experimental_compact_prompt_file = "./compact_prompt.txt"
415555# include_apply_patch_tool = false# tools_view_image = true
416# experimental_use_unified_exec_tool = false
417# experimental_use_freeform_apply_patch = false
418# tools.web_search = false # deprecated legacy alias; prefer top-level `web_search`
419# features = { unified_exec = false }556# features = { unified_exec = false }
420 557
421################################################################################
422# Apps / Connectors
423################################################################################
424
425# Optional per-app controls.
426[apps]
427# [apps.google_drive]
428# enabled = false
429# disabled_reason = "user" # or "unknown"
430
431################################################################################558################################################################################
432# Projects (trust levels)559# Projects (trust levels)
433################################################################################560################################################################################
434 561
435# Mark specific worktrees as trusted or untrusted.
436[projects]562[projects]
563# Mark specific worktrees as trusted or untrusted.
437# [projects."/absolute/path/to/project"]564# [projects."/absolute/path/to/project"]
438# trust_level = "trusted" # or "untrusted"565# trust_level = "trusted" # or "untrusted"
439 566
567################################################################################
568# Tools
569################################################################################
570
571[tools]
572# view_image = true
573
440################################################################################574################################################################################
441# OpenTelemetry (OTEL) - disabled by default575# OpenTelemetry (OTEL) - disabled by default
442################################################################################576################################################################################
450exporter = "none"584exporter = "none"
451# Trace exporter: none (default) | otlp-http | otlp-grpc585# Trace exporter: none (default) | otlp-http | otlp-grpc
452trace_exporter = "none"586trace_exporter = "none"
587# Metrics exporter: none | statsig | otlp-http | otlp-grpc
588metrics_exporter = "statsig"
453 589
454# Example OTLP/HTTP exporter configuration590# Example OTLP/HTTP exporter configuration
455# [otel.exporter."otlp-http"]591# [otel.exporter."otlp-http"]
459# [otel.exporter."otlp-http".headers]595# [otel.exporter."otlp-http".headers]
460# "x-otlp-api-key" = "${OTLP_TOKEN}"596# "x-otlp-api-key" = "${OTLP_TOKEN}"
461 597
462# Example OTLP/gRPC exporter configuration
463# [otel.exporter."otlp-grpc"]
464# endpoint = "https://otel.example.com:4317",
465# headers = { "x-otlp-meta" = "abc123" }
466
467# Example OTLP exporter with mutual TLS
468# [otel.exporter."otlp-http"]
469# endpoint = "https://otel.example.com/v1/logs"
470# protocol = "binary"
471
472# [otel.exporter."otlp-http".headers]
473# "x-otlp-api-key" = "${OTLP_TOKEN}"
474
475# [otel.exporter."otlp-http".tls]598# [otel.exporter."otlp-http".tls]
476# ca-certificate = "certs/otel-ca.pem"599# ca-certificate = "certs/otel-ca.pem"
477# client-certificate = "/etc/codex/certs/client.pem"600# client-certificate = "/etc/codex/certs/client.pem"
478# client-private-key = "/etc/codex/certs/client-key.pem"601# client-private-key = "/etc/codex/certs/client-key.pem"
602
603# Example OTLP/gRPC trace exporter configuration
604# [otel.trace_exporter."otlp-grpc"]
605# endpoint = "https://otel.example.com:4317"
606# headers = { "x-otlp-meta" = "abc123" }
607
608################################################################################
609# Windows
610################################################################################
611
612[windows]
613# Native Windows sandbox mode (Windows only): unelevated | elevated
614sandbox = "unelevated"
479```615```