config-sample.md +183 −130
1# Sample Configuration1# Sample Configuration
2 2
33Use this example configuration as a starting point. It includes most keys Codex reads from `config.toml`, along with defaults and short notes.Use this example configuration as a starting point. It includes most keys Codex reads from `config.toml`, along with default behaviors, recommended values where helpful, and short notes.
4 4
5For explanations and guidance, see:5For explanations and guidance, see:
6 6
7- [Config basics](https://developers.openai.com/codex/config-basic)7- [Config basics](https://developers.openai.com/codex/config-basic)
8- [Advanced Config](https://developers.openai.com/codex/config-advanced)8- [Advanced Config](https://developers.openai.com/codex/config-advanced)
9- [Config Reference](https://developers.openai.com/codex/config-reference)9- [Config Reference](https://developers.openai.com/codex/config-reference)
1010- [Sandbox and approvals](https://developers.openai.com/codex/security#sandbox-and-approvals)- [Sandbox and approvals](https://developers.openai.com/codex/agent-approvals-security#sandbox-and-approvals)
1111- [Managed configuration](https://developers.openai.com/codex/security#managed-configuration)- [Managed configuration](https://developers.openai.com/codex/enterprise/managed-configuration)
12 12
13Use the snippet below as a reference. Copy only the keys and sections you need into `~/.codex/config.toml` (or into a project-scoped `.codex/config.toml`), then adjust values for your setup.13Use the snippet below as a reference. Copy only the keys and sections you need into `~/.codex/config.toml` (or into a project-scoped `.codex/config.toml`), then adjust values for your setup.
14 14
15```toml15```toml
16# Codex example configuration (config.toml)16# Codex example configuration (config.toml)
17#17#
1818# This file lists all keys Codex reads from config.toml, their default values,# This file lists the main keys Codex reads from config.toml, along with default
1919# and concise explanations. Values here mirror the effective defaults compiled# behaviors, recommended examples, and concise explanations. Adjust as needed.
20# into the CLI. Adjust as needed.
21#20#
22# Notes21# Notes
23# - Root keys must appear before tables in TOML.22# - Root keys must appear before tables in TOML.
28# Core Model Selection27# Core Model Selection
29################################################################################28################################################################################
30 29
3130# Primary model used by Codex. Default: "gpt-5.2-codex" on all platforms.# Primary model used by Codex. Recommended example for most users: "gpt-5.4".
3231model = "gpt-5.2-codex"model = "gpt-5.4"
33 32
3433# Default communication style for supported models. Default: "friendly".# Communication style for supported models. Allowed values: none | friendly | pragmatic
3534# Allowed values: none | friendly | pragmatic# personality = "pragmatic"
36# personality = "friendly"
37 35
38# Optional model override for /review. Default: unset (uses current session model).36# Optional model override for /review. Default: unset (uses current session model).
3937# review_model = "gpt-5.2-codex"# review_model = "gpt-5.4"
40 38
41# Provider id selected from [model_providers]. Default: "openai".39# Provider id selected from [model_providers]. Default: "openai".
42model_provider = "openai"40model_provider = "openai"
44# Default OSS provider for --oss sessions. When unset, Codex prompts. Default: unset.42# Default OSS provider for --oss sessions. When unset, Codex prompts. Default: unset.
45# oss_provider = "ollama"43# oss_provider = "ollama"
46 44
4745# Optional manual model metadata. When unset, Codex auto-detects from model.# Preferred service tier. `fast` is honored only when enabled in [features].
4846# Uncomment to force values.# service_tier = "flex" # fast | flex
47
48# Optional manual model metadata. When unset, Codex uses model or preset defaults.
49# model_context_window = 128000 # tokens; default: auto for model49# model_context_window = 128000 # tokens; default: auto for model
5050# model_auto_compact_token_limit = 0 # tokens; unset uses model defaults# model_auto_compact_token_limit = 64000 # tokens; unset uses model defaults
5151# tool_output_token_limit = 10000 # tokens stored per tool output; default: 10000 for gpt-5.2-codex# tool_output_token_limit = 12000 # tokens stored per tool output
52# model_catalog_json = "/absolute/path/to/models.json" # optional startup-only model catalog override52# model_catalog_json = "/absolute/path/to/models.json" # optional startup-only model catalog override
53# background_terminal_max_timeout = 300000 # ms; max empty write_stdin poll window (default 5m)53# background_terminal_max_timeout = 300000 # ms; max empty write_stdin poll window (default 5m)
54# log_dir = "/absolute/path/to/codex-logs" # directory for Codex logs; default: "$CODEX_HOME/log"54# log_dir = "/absolute/path/to/codex-logs" # directory for Codex logs; default: "$CODEX_HOME/log"
58# Reasoning & Verbosity (Responses API capable models)58# Reasoning & Verbosity (Responses API capable models)
59################################################################################59################################################################################
60 60
6161# Reasoning effort: minimal | low | medium | high | xhigh (default: medium; xhigh on gpt-5.2-codex and gpt-5.2)# Reasoning effort: minimal | low | medium | high | xhigh
6262model_reasoning_effort = "medium"# model_reasoning_effort = "medium"
63
64# Optional override used when Codex runs in plan mode: none | minimal | low | medium | high | xhigh
65# plan_mode_reasoning_effort = "high"
63 66
6467# Reasoning summary: auto | concise | detailed | none (default: auto)# Reasoning summary: auto | concise | detailed | none
65# model_reasoning_summary = "auto"68# model_reasoning_summary = "auto"
66 69
6770# Text verbosity for GPT-5 family (Responses API): low | medium | high (default: medium)# Text verbosity for GPT-5 family (Responses API): low | medium | high
68# model_verbosity = "medium"71# model_verbosity = "medium"
69 72
7073# Force enable or disable reasoning summaries for current model# Force enable or disable reasoning summaries for current model.
71# model_supports_reasoning_summaries = true74# model_supports_reasoning_summaries = true
72 75
73################################################################################76################################################################################
77# Additional user instructions are injected before AGENTS.md. Default: unset.80# Additional user instructions are injected before AGENTS.md. Default: unset.
78# developer_instructions = ""81# developer_instructions = ""
79 82
80# (Ignored) Optional legacy base instructions override (prefer AGENTS.md). Default: unset.
81# instructions = ""
82
83# Inline override for the history compaction prompt. Default: unset.83# Inline override for the history compaction prompt. Default: unset.
84# compact_prompt = ""84# compact_prompt = ""
85 85
86# Override the default commit co-author trailer. Set to "" to disable it.
87# commit_attribution = "Jane Doe <jane@example.com>"
88
86# Override built-in base instructions with a file path. Default: unset.89# Override built-in base instructions with a file path. Default: unset.
87# model_instructions_file = "/absolute/or/relative/path/to/instructions.txt"90# model_instructions_file = "/absolute/or/relative/path/to/instructions.txt"
88 91
89# Migration note: experimental_instructions_file was renamed to model_instructions_file (deprecated).
90
91# Load the compact prompt override from a file. Default: unset.92# Load the compact prompt override from a file. Default: unset.
92# experimental_compact_prompt_file = "/absolute/or/relative/path/to/compact_prompt.txt"93# experimental_compact_prompt_file = "/absolute/or/relative/path/to/compact_prompt.txt"
93 94
94# Legacy name for apply_patch_freeform. Default: false
95include_apply_patch_tool = false
96
97################################################################################95################################################################################
98# Notifications96# Notifications
99################################################################################97################################################################################
100 98
101# External notifier program (argv array). When unset: disabled.99# External notifier program (argv array). When unset: disabled.
102100# Example: notify = ["notify-send", "Codex"]# notify = ["notify-send", "Codex"]
103notify = [ ]
104 101
105################################################################################102################################################################################
106# Approval & Sandbox103# Approval & Sandbox
110# - untrusted: only known-safe read-only commands auto-run; others prompt107# - untrusted: only known-safe read-only commands auto-run; others prompt
111# - on-request: model decides when to ask (default)108# - on-request: model decides when to ask (default)
112# - never: never prompt (risky)109# - never: never prompt (risky)
113110# - { reject = { ... } }: auto-reject selected prompt categories# - { granular = { ... } }: allow or auto-reject selected prompt categories
114approval_policy = "on-request"111approval_policy = "on-request"
115112# Example granular auto-reject policy:# Who reviews eligible approval prompts: user (default) | guardian_subagent
116113# approval_policy = { reject = { sandbox_approval = true, rules = false, mcp_elicitations = false } }# approvals_reviewer = "user"
114
115# Example granular policy:
116# approval_policy = { granular = {
117# sandbox_approval = true,
118# rules = true,
119# mcp_elicitations = true,
120# request_permissions = false,
121# skill_approval = false
122# } }
117 123
118# Allow login-shell semantics for shell-based tools when they request `login = true`.124# Allow login-shell semantics for shell-based tools when they request `login = true`.
119# Default: true. Set false to force non-login shells and reject explicit login-shell requests.125# Default: true. Set false to force non-login shells and reject explicit login-shell requests.
124# - workspace-write130# - workspace-write
125# - danger-full-access (no sandbox; extremely risky)131# - danger-full-access (no sandbox; extremely risky)
126sandbox_mode = "read-only"132sandbox_mode = "read-only"
127133 # Named permissions profile to apply by default. Required before using [permissions.<name>].
128134[windows]# default_permissions = "workspace"
129# Native Windows sandbox mode (Windows only): unelevated | elevated
130sandbox = "unelevated"
131 135
132################################################################################136################################################################################
133# Authentication & Login137# Authentication & Login
136# Where to persist CLI login credentials: file (default) | keyring | auto140# Where to persist CLI login credentials: file (default) | keyring | auto
137cli_auth_credentials_store = "file"141cli_auth_credentials_store = "file"
138 142
139143# Base URL for ChatGPT auth flow (not OpenAI API). Default:# Base URL for ChatGPT auth flow (not OpenAI API).
140chatgpt_base_url = "https://chatgpt.com/backend-api/"144chatgpt_base_url = "https://chatgpt.com/backend-api/"
141 145
146# Optional base URL override for the built-in OpenAI provider.
147# openai_base_url = "https://us.api.openai.com/v1"
148
142# Restrict ChatGPT login to a specific workspace id. Default: unset.149# Restrict ChatGPT login to a specific workspace id. Default: unset.
143150# forced_chatgpt_workspace_id = ""# forced_chatgpt_workspace_id = "00000000-0000-0000-0000-000000000000"
144 151
145# Force login mechanism when Codex would normally auto-select. Default: unset.152# Force login mechanism when Codex would normally auto-select. Default: unset.
146# Allowed values: chatgpt | api153# Allowed values: chatgpt | api
205# If you use --yolo or another full access sandbox setting, web search defaults to live.210# If you use --yolo or another full access sandbox setting, web search defaults to live.
206web_search = "cached"211web_search = "cached"
207 212
208################################################################################
209# Profiles (named presets)
210################################################################################
211
212# Active profile name. When unset, no profile is applied.213# Active profile name. When unset, no profile is applied.
213# profile = "default"214# profile = "default"
214 215
216# Suppress the warning shown when under-development feature flags are enabled.
217# suppress_unstable_features_warning = true
218
215################################################################################219################################################################################
216# Agents (multi-agent roles and limits)220# Agents (multi-agent roles and limits)
217################################################################################221################################################################################
218 222
219223# [agents][agents]
220# Maximum concurrently open agent threads. Default: 6224# Maximum concurrently open agent threads. Default: 6
221# max_threads = 6225# max_threads = 6
222# Maximum nested spawn depth. Root session starts at depth 0. Default: 1226# Maximum nested spawn depth. Root session starts at depth 0. Default: 1
225# job_max_runtime_seconds = 1800229# job_max_runtime_seconds = 1800
226 230
227# [agents.reviewer]231# [agents.reviewer]
228232# description = "Find security, correctness, and test risks in code."# description = "Find correctness, security, and test risks in code."
229# config_file = "./agents/reviewer.toml" # relative to the config.toml that defines it233# config_file = "./agents/reviewer.toml" # relative to the config.toml that defines it
234# nickname_candidates = ["Athena", "Ada"]
230 235
231################################################################################236################################################################################
232# Skills (per-skill overrides)237# Skills (per-skill overrides)
237# path = "/path/to/skill/SKILL.md"242# path = "/path/to/skill/SKILL.md"
238# enabled = false243# enabled = false
239 244
240################################################################################
241# Experimental toggles (legacy; prefer [features])
242################################################################################
243
244experimental_use_unified_exec_tool = false
245
246# Include apply_patch via freeform editing path (affects default tool set). Default: false
247experimental_use_freeform_apply_patch = false
248
249################################################################################245################################################################################
250# Sandbox settings (tables)246# Sandbox settings (tables)
251################################################################################247################################################################################
268[shell_environment_policy]264[shell_environment_policy]
269# inherit: all (default) | core | none265# inherit: all (default) | core | none
270inherit = "all"266inherit = "all"
271267# Skip default excludes for names containing KEY/SECRET/TOKEN (case-insensitive). Default: true# Skip default excludes for names containing KEY/SECRET/TOKEN (case-insensitive). Default: false
272268ignore_default_excludes = trueignore_default_excludes = false
273# Case-insensitive glob patterns to remove (e.g., "AWS_*", "AZURE_*"). Default: []269# Case-insensitive glob patterns to remove (e.g., "AWS_*", "AZURE_*"). Default: []
274exclude = []270exclude = []
275# Explicit key/value overrides (always win). Default: {}271# Explicit key/value overrides (always win). Default: {}
279# Experimental: run via user shell profile. Default: false275# Experimental: run via user shell profile. Default: false
280experimental_use_profile = false276experimental_use_profile = false
281 277
278################################################################################
279# Managed network proxy settings
280################################################################################
281
282# Set `default_permissions = "workspace"` before enabling this profile.
283# [permissions.workspace.network]
284# enabled = true
285# proxy_url = "http://127.0.0.1:43128"
286# admin_url = "http://127.0.0.1:43129"
287# enable_socks5 = false
288# socks_url = "http://127.0.0.1:43130"
289# enable_socks5_udp = false
290# allow_upstream_proxy = false
291# dangerously_allow_non_loopback_proxy = false
292# dangerously_allow_non_loopback_admin = false
293# dangerously_allow_all_unix_sockets = false
294# mode = "limited" # limited | full
295# allow_local_binding = false
296#
297# [permissions.workspace.network.domains]
298# "api.openai.com" = "allow"
299# "example.com" = "deny"
300#
301# [permissions.workspace.network.unix_sockets]
302# "/var/run/docker.sock" = "allow"
303
282################################################################################304################################################################################
283# History (table)305# History (table)
284################################################################################306################################################################################
287# save-all (default) | none309# save-all (default) | none
288persistence = "save-all"310persistence = "save-all"
289# Maximum bytes for history file; oldest entries are trimmed when exceeded. Example: 5242880311# Maximum bytes for history file; oldest entries are trimmed when exceeded. Example: 5242880
290312# max_bytes = 0# max_bytes = 5242880
291 313
292################################################################################314################################################################################
293# UI, Notifications, and Misc (tables)315# UI, Notifications, and Misc (tables)
315# Set to [] to hide the footer.337# Set to [] to hide the footer.
316# status_line = ["model", "context-remaining", "git-branch"]338# status_line = ["model", "context-remaining", "git-branch"]
317 339
340# Ordered list of terminal window/tab title item IDs. When unset, Codex uses:
341# ["spinner", "project"]. Set to [] to clear the title.
342# Available IDs include app-name, project, spinner, status, thread, git-branch, model,
343# and task-progress.
344# terminal_title = ["spinner", "project"]
345
318# Syntax-highlighting theme (kebab-case). Use /theme in the TUI to preview and save.346# Syntax-highlighting theme (kebab-case). Use /theme in the TUI to preview and save.
319# You can also add custom .tmTheme files under $CODEX_HOME/themes.347# You can also add custom .tmTheme files under $CODEX_HOME/themes.
320# theme = "catppuccin-mocha"348# theme = "catppuccin-mocha"
321 349
350# Internal tooltip state keyed by model slug. Usually managed by Codex.
351# [tui.model_availability_nux]
352# "gpt-5.4" = 1
353
354# Enable or disable analytics for this machine. When unset, Codex uses its default behavior.
355[analytics]
356enabled = true
357
322# Control whether users can submit feedback from `/feedback`. Default: true358# Control whether users can submit feedback from `/feedback`. Default: true
323[feedback]359[feedback]
324enabled = true360enabled = true
330# hide_rate_limit_model_nudge = true366# hide_rate_limit_model_nudge = true
331# hide_gpt5_1_migration_prompt = true367# hide_gpt5_1_migration_prompt = true
332# "hide_gpt-5.1-codex-max_migration_prompt" = true368# "hide_gpt-5.1-codex-max_migration_prompt" = true
333369# model_migrations = { "gpt-4.1" = "gpt-5.1" }# model_migrations = { "gpt-5.3-codex" = "gpt-5.4" }
334
335# Suppress the warning shown when under-development feature flags are enabled.
336# suppress_unstable_features_warning = true
337 370
338################################################################################371################################################################################
339# Centralized Feature Flags (preferred)372# Centralized Feature Flags (preferred)
343# Leave this table empty to accept defaults. Set explicit booleans to opt in/out.376# Leave this table empty to accept defaults. Set explicit booleans to opt in/out.
344# shell_tool = true377# shell_tool = true
345# apps = false378# apps = false
346379# apps_mcp_gateway = false# codex_hooks = false
347380# web_search_cached = false# unified_exec = true
348381# web_search_request = false# shell_snapshot = true
349382# unified_exec = false# multi_agent = true
350# shell_snapshot = false
351# apply_patch_freeform = false
352# multi_agent = false
353# search_tool = false
354# personality = true383# personality = true
355384# request_rule = true# fast_mode = true
356385# collaboration_modes = true# smart_approvals = false
357386# use_linux_sandbox_bwrap = false# enable_request_compression = true
358387# remote_models = false# skill_mcp_dependency_install = true
359388# runtime_metrics = false# prevent_idle_sleep = false
360# powershell_utf8 = true
361# child_agents_md = false
362 389
363################################################################################390################################################################################
364# Define MCP servers under this table. Leave empty to disable.391# Define MCP servers under this table. Leave empty to disable.
380# tool_timeout_sec = 60.0 # optional; default 60.0 seconds407# tool_timeout_sec = 60.0 # optional; default 60.0 seconds
381# enabled_tools = ["search", "summarize"] # optional allow-list408# enabled_tools = ["search", "summarize"] # optional allow-list
382# disabled_tools = ["slow-tool"] # optional deny-list (applied after allow-list)409# disabled_tools = ["slow-tool"] # optional deny-list (applied after allow-list)
410# scopes = ["read:docs"] # optional OAuth scopes
411# oauth_resource = "https://docs.example.com/" # optional OAuth resource
383 412
384# --- Example: Streamable HTTP transport ---413# --- Example: Streamable HTTP transport ---
385# [mcp_servers.github]414# [mcp_servers.github]
392# startup_timeout_sec = 10.0 # optional421# startup_timeout_sec = 10.0 # optional
393# tool_timeout_sec = 60.0 # optional422# tool_timeout_sec = 60.0 # optional
394# enabled_tools = ["list_issues"] # optional allow-list423# enabled_tools = ["list_issues"] # optional allow-list
424# disabled_tools = ["delete_issue"] # optional deny-list
425# scopes = ["repo"] # optional OAuth scopes
395 426
396################################################################################427################################################################################
397# Model Providers428# Model Providers
398################################################################################429################################################################################
399 430
400# Built-ins include:431# Built-ins include:
401432# - openai (Responses API; requires login or OPENAI_API_KEY via auth flow)# - openai
402433# - oss (Chat Completions API; defaults to http://localhost:11434/v1)# - ollama
434# - lmstudio
435# These IDs are reserved. Use a different ID for custom providers.
403 436
404[model_providers]437[model_providers]
405 438
407# [model_providers.openaidr]440# [model_providers.openaidr]
408# name = "OpenAI Data Residency"441# name = "OpenAI Data Residency"
409# base_url = "https://us.api.openai.com/v1" # example with 'us' domain prefix442# base_url = "https://us.api.openai.com/v1" # example with 'us' domain prefix
410443# wire_api = "responses" # "responses" | "chat" (default varies)# wire_api = "responses" # only supported value
411444# # requires_openai_auth = true # built-in OpenAI defaults to true# # requires_openai_auth = true # use only for providers backed by OpenAI auth
412# # request_max_retries = 4 # default 4; max 100445# # request_max_retries = 4 # default 4; max 100
413# # stream_max_retries = 5 # default 5; max 100446# # stream_max_retries = 5 # default 5; max 100
414# # stream_idle_timeout_ms = 300000 # default 300_000 (5m)447# # stream_idle_timeout_ms = 300000 # default 300_000 (5m)
448# # supports_websockets = true # optional
415# # experimental_bearer_token = "sk-example" # optional dev-only direct bearer token449# # experimental_bearer_token = "sk-example" # optional dev-only direct bearer token
416# # http_headers = { "X-Example" = "value" }450# # http_headers = { "X-Example" = "value" }
417# # env_http_headers = { "OpenAI-Organization" = "OPENAI_ORGANIZATION", "OpenAI-Project" = "OPENAI_PROJECT" }451# # env_http_headers = { "OpenAI-Organization" = "OPENAI_ORGANIZATION", "OpenAI-Project" = "OPENAI_PROJECT" }
418 452
419453# --- Example: Azure (Chat/Responses depending on endpoint) ---# --- Example: Azure/OpenAI-compatible provider ---
420# [model_providers.azure]454# [model_providers.azure]
421# name = "Azure"455# name = "Azure"
422# base_url = "https://YOUR_PROJECT_NAME.openai.azure.com/openai"456# base_url = "https://YOUR_PROJECT_NAME.openai.azure.com/openai"
423457# wire_api = "responses" # or "chat" per endpoint# wire_api = "responses"
424# query_params = { api-version = "2025-04-01-preview" }458# query_params = { api-version = "2025-04-01-preview" }
425# env_key = "AZURE_OPENAI_API_KEY"459# env_key = "AZURE_OPENAI_API_KEY"
426460# # env_key_instructions = "Set AZURE_OPENAI_API_KEY in your environment"# env_key_instructions = "Set AZURE_OPENAI_API_KEY in your environment"
461# # supports_websockets = false
462
463# --- Example: command-backed bearer token auth ---
464# [model_providers.proxy]
465# name = "OpenAI using LLM proxy"
466# base_url = "https://proxy.example.com/v1"
467# wire_api = "responses"
468#
469# [model_providers.proxy.auth]
470# command = "/usr/local/bin/fetch-codex-token"
471# args = ["--audience", "codex"]
472# timeout_ms = 5000
473# refresh_interval_ms = 300000
427 474
428# --- Example: Local OSS (e.g., Ollama-compatible) ---475# --- Example: Local OSS (e.g., Ollama-compatible) ---
429476# [model_providers.ollama]# [model_providers.local_ollama]
430# name = "Ollama"477# name = "Ollama"
431# base_url = "http://localhost:11434/v1"478# base_url = "http://localhost:11434/v1"
432479# wire_api = "chat"# wire_api = "responses"
433
434################################################################################
435# Profiles (named presets)
436################################################################################
437
438[profiles]
439
440# [profiles.default]
441# model = "gpt-5.2-codex"
442# model_provider = "openai"
443# approval_policy = "on-request"
444# sandbox_mode = "read-only"
445# oss_provider = "ollama"
446# model_reasoning_effort = "medium"
447# model_reasoning_summary = "auto"
448# model_verbosity = "medium"
449# personality = "friendly" # or "pragmatic" or "none"
450# chatgpt_base_url = "https://chatgpt.com/backend-api/"
451# model_catalog_json = "./models.json"
452# experimental_compact_prompt_file = "./compact_prompt.txt"
453# include_apply_patch_tool = false
454# experimental_use_unified_exec_tool = false
455# experimental_use_freeform_apply_patch = false
456# tools.web_search = false # deprecated legacy alias; prefer top-level `web_search`
457# features = { unified_exec = false }
458 480
459################################################################################481################################################################################
460# Apps / Connectors482# Apps / Connectors
478# enabled = false500# enabled = false
479# approval_mode = "approve"501# approval_mode = "approve"
480 502
503# Optional tool suggestion allowlist for connectors or plugins Codex can offer to install.
504# [tool_suggest]
505# discoverables = [
506# { type = "connector", id = "gmail" },
507# { type = "plugin", id = "figma@openai-curated" },
508# ]
509
510################################################################################
511# Profiles (named presets)
512################################################################################
513
514[profiles]
515
516# [profiles.default]
517# model = "gpt-5.4"
518# model_provider = "openai"
519# approval_policy = "on-request"
520# sandbox_mode = "read-only"
521# service_tier = "flex"
522# oss_provider = "ollama"
523# model_reasoning_effort = "medium"
524# plan_mode_reasoning_effort = "high"
525# model_reasoning_summary = "auto"
526# model_verbosity = "medium"
527# personality = "pragmatic" # or "friendly" or "none"
528# chatgpt_base_url = "https://chatgpt.com/backend-api/"
529# model_catalog_json = "./models.json"
530# model_instructions_file = "/absolute/or/relative/path/to/instructions.txt"
531# experimental_compact_prompt_file = "./compact_prompt.txt"
532# tools_view_image = true
533# features = { unified_exec = false }
534
481################################################################################535################################################################################
482# Projects (trust levels)536# Projects (trust levels)
483################################################################################537################################################################################
484 538
485# Mark specific worktrees as trusted or untrusted.
486[projects]539[projects]
540# Mark specific worktrees as trusted or untrusted.
487# [projects."/absolute/path/to/project"]541# [projects."/absolute/path/to/project"]
488# trust_level = "trusted" # or "untrusted"542# trust_level = "trusted" # or "untrusted"
489 543
544################################################################################
545# Tools
546################################################################################
547
548[tools]
549# view_image = true
550
490################################################################################551################################################################################
491# OpenTelemetry (OTEL) - disabled by default552# OpenTelemetry (OTEL) - disabled by default
492################################################################################553################################################################################
500exporter = "none"561exporter = "none"
501# Trace exporter: none (default) | otlp-http | otlp-grpc562# Trace exporter: none (default) | otlp-http | otlp-grpc
502trace_exporter = "none"563trace_exporter = "none"
564# Metrics exporter: none | statsig | otlp-http | otlp-grpc
565metrics_exporter = "statsig"
503 566
504# Example OTLP/HTTP exporter configuration567# Example OTLP/HTTP exporter configuration
505# [otel.exporter."otlp-http"]568# [otel.exporter."otlp-http"]
509# [otel.exporter."otlp-http".headers]572# [otel.exporter."otlp-http".headers]
510# "x-otlp-api-key" = "${OTLP_TOKEN}"573# "x-otlp-api-key" = "${OTLP_TOKEN}"
511 574
512# Example OTLP/gRPC exporter configuration
513# [otel.exporter."otlp-grpc"]
514# endpoint = "https://otel.example.com:4317",
515# headers = { "x-otlp-meta" = "abc123" }
516
517# Example OTLP exporter with mutual TLS
518# [otel.exporter."otlp-http"]
519# endpoint = "https://otel.example.com/v1/logs"
520# protocol = "binary"
521
522# [otel.exporter."otlp-http".headers]
523# "x-otlp-api-key" = "${OTLP_TOKEN}"
524
525# [otel.exporter."otlp-http".tls]575# [otel.exporter."otlp-http".tls]
526# ca-certificate = "certs/otel-ca.pem"576# ca-certificate = "certs/otel-ca.pem"
527# client-certificate = "/etc/codex/certs/client.pem"577# client-certificate = "/etc/codex/certs/client.pem"
528# client-private-key = "/etc/codex/certs/client-key.pem"578# client-private-key = "/etc/codex/certs/client-key.pem"
529```
530 579
531580################################################################################# Example OTLP/gRPC trace exporter configuration
581# [otel.trace_exporter."otlp-grpc"]
582# endpoint = "https://otel.example.com:4317"
583# headers = { "x-otlp-meta" = "abc123" }
532 584
585################################################################################
533# Windows586# Windows
534
535################################################################################587################################################################################
536 588
537[windows]589[windows]
538
539# Native Windows sandbox mode (Windows only): unelevated | elevated590# Native Windows sandbox mode (Windows only): unelevated | elevated
540
541sandbox = "unelevated"591sandbox = "unelevated"
592```