config-sample.md +123 −30
27# Core Model Selection27# Core Model Selection
28################################################################################28################################################################################
29 29
3030# Primary model used by Codex. Recommended example for most users: "gpt-5.4".# Primary model used by Codex. Recommended example for most users: "gpt-5.5".
3131model = "gpt-5.4"model = "gpt-5.5"
32 32
33# Communication style for supported models. Allowed values: none | friendly | pragmatic33# Communication style for supported models. Allowed values: none | friendly | pragmatic
34# personality = "pragmatic"34# personality = "pragmatic"
35 35
36# Optional model override for /review. Default: unset (uses current session model).36# Optional model override for /review. Default: unset (uses current session model).
3737# review_model = "gpt-5.4"# review_model = "gpt-5.5"
38 38
39# Provider id selected from [model_providers]. Default: "openai".39# Provider id selected from [model_providers]. Default: "openai".
40model_provider = "openai"40model_provider = "openai"
107# - untrusted: only known-safe read-only commands auto-run; others prompt107# - untrusted: only known-safe read-only commands auto-run; others prompt
108# - on-request: model decides when to ask (default)108# - on-request: model decides when to ask (default)
109# - never: never prompt (risky)109# - never: never prompt (risky)
110110# - { reject = { ... } }: auto-reject selected prompt categories# - { granular = { ... } }: allow or auto-reject selected prompt categories
111approval_policy = "on-request"111approval_policy = "on-request"
112112# Example granular auto-reject policy:# Who reviews eligible approval prompts: user (default) | auto_review
113113# approval_policy = { reject = { sandbox_approval = true, rules = false, mcp_elicitations = false } }# approvals_reviewer = "user"
114
115# Example granular policy:
116# approval_policy = { granular = {
117# sandbox_approval = true,
118# rules = true,
119# mcp_elicitations = true,
120# request_permissions = false,
121# skill_approval = false
122# } }
114 123
115# Allow login-shell semantics for shell-based tools when they request `login = true`.124# Allow login-shell semantics for shell-based tools when they request `login = true`.
116# Default: true. Set false to force non-login shells and reject explicit login-shell requests.125# Default: true. Set false to force non-login shells and reject explicit login-shell requests.
121# - workspace-write130# - workspace-write
122# - danger-full-access (no sandbox; extremely risky)131# - danger-full-access (no sandbox; extremely risky)
123sandbox_mode = "read-only"132sandbox_mode = "read-only"
133# Named permissions profile to apply by default. Built-ins:
134# :read-only | :workspace | :danger-no-sandbox
135# Use a custom name such as "workspace" only when you also define [permissions.workspace].
136# default_permissions = ":workspace"
137
138# Example filesystem profile. Use `"none"` to deny reads for exact paths or
139# glob patterns. On platforms that need pre-expanded glob matches, set
140# glob_scan_max_depth when using unbounded patterns such as `**`.
141# [permissions.workspace.filesystem]
142# glob_scan_max_depth = 3
143# ":project_roots" = { "." = "write", "**/*.env" = "none" }
144# "/absolute/path/to/secrets" = "none"
124 145
125################################################################################146################################################################################
126# Authentication & Login147# Authentication & Login
132# Base URL for ChatGPT auth flow (not OpenAI API).153# Base URL for ChatGPT auth flow (not OpenAI API).
133chatgpt_base_url = "https://chatgpt.com/backend-api/"154chatgpt_base_url = "https://chatgpt.com/backend-api/"
134 155
156# Optional base URL override for the built-in OpenAI provider.
157# openai_base_url = "https://us.api.openai.com/v1"
158
135# Restrict ChatGPT login to a specific workspace id. Default: unset.159# Restrict ChatGPT login to a specific workspace id. Default: unset.
136# forced_chatgpt_workspace_id = "00000000-0000-0000-0000-000000000000"160# forced_chatgpt_workspace_id = "00000000-0000-0000-0000-000000000000"
137 161
265# Managed network proxy settings289# Managed network proxy settings
266################################################################################290################################################################################
267 291
268292[permissions.network]# Set `default_permissions = "workspace"` before enabling this profile.
293# [permissions.workspace.network]
269# enabled = true294# enabled = true
270# proxy_url = "http://127.0.0.1:43128"295# proxy_url = "http://127.0.0.1:43128"
271# admin_url = "http://127.0.0.1:43129"296# admin_url = "http://127.0.0.1:43129"
277# dangerously_allow_non_loopback_admin = false302# dangerously_allow_non_loopback_admin = false
278# dangerously_allow_all_unix_sockets = false303# dangerously_allow_all_unix_sockets = false
279# mode = "limited" # limited | full304# mode = "limited" # limited | full
280# allowed_domains = ["api.openai.com"]
281# denied_domains = ["example.com"]
282# allow_unix_sockets = ["/var/run/docker.sock"]
283# allow_local_binding = false305# allow_local_binding = false
306#
307# [permissions.workspace.network.domains]
308# "api.openai.com" = "allow"
309# "example.com" = "deny"
310#
311# [permissions.workspace.network.unix_sockets]
312# "/var/run/docker.sock" = "allow"
284 313
285################################################################################314################################################################################
286# History (table)315# History (table)
304# Notification mechanism for terminal alerts: auto | osc9 | bel. Default: "auto"333# Notification mechanism for terminal alerts: auto | osc9 | bel. Default: "auto"
305# notification_method = "auto"334# notification_method = "auto"
306 335
336# When notifications fire: unfocused (default) | always
337# notification_condition = "unfocused"
338
307# Enables welcome/status/spinner animations. Default: true339# Enables welcome/status/spinner animations. Default: true
308animations = true340animations = true
309 341
318# Set to [] to hide the footer.350# Set to [] to hide the footer.
319# status_line = ["model", "context-remaining", "git-branch"]351# status_line = ["model", "context-remaining", "git-branch"]
320 352
353# Ordered list of terminal window/tab title item IDs. When unset, Codex uses:
354# ["spinner", "project"]. Set to [] to clear the title.
355# Available IDs include app-name, project, spinner, status, thread, git-branch, model,
356# and task-progress.
357# terminal_title = ["spinner", "project"]
358
321# Syntax-highlighting theme (kebab-case). Use /theme in the TUI to preview and save.359# Syntax-highlighting theme (kebab-case). Use /theme in the TUI to preview and save.
322# You can also add custom .tmTheme files under $CODEX_HOME/themes.360# You can also add custom .tmTheme files under $CODEX_HOME/themes.
323# theme = "catppuccin-mocha"361# theme = "catppuccin-mocha"
324 362
363# Custom key bindings. Context-specific bindings override [tui.keymap.global].
364# Use [] to unbind an action.
365# [tui.keymap.global]
366# open_transcript = "ctrl-t"
367# open_external_editor = []
368#
369# [tui.keymap.composer]
370# submit = ["enter", "ctrl-m"]
371
325# Internal tooltip state keyed by model slug. Usually managed by Codex.372# Internal tooltip state keyed by model slug. Usually managed by Codex.
326# [tui.model_availability_nux]373# [tui.model_availability_nux]
327# "gpt-5.4" = 1374# "gpt-5.4" = 1
341# hide_rate_limit_model_nudge = true388# hide_rate_limit_model_nudge = true
342# hide_gpt5_1_migration_prompt = true389# hide_gpt5_1_migration_prompt = true
343# "hide_gpt-5.1-codex-max_migration_prompt" = true390# "hide_gpt-5.1-codex-max_migration_prompt" = true
344391# model_migrations = { "gpt-4.1" = "gpt-5.1" }# model_migrations = { "gpt-5.3-codex" = "gpt-5.4" }
345 392
346################################################################################393################################################################################
347# Centralized Feature Flags (preferred)394# Centralized Feature Flags (preferred)
351# Leave this table empty to accept defaults. Set explicit booleans to opt in/out.398# Leave this table empty to accept defaults. Set explicit booleans to opt in/out.
352# shell_tool = true399# shell_tool = true
353# apps = false400# apps = false
354401# apps_mcp_gateway = false# codex_hooks = false
355402# unified_exec = false# unified_exec = true
356403# shell_snapshot = false# shell_snapshot = true
357404# multi_agent = false# multi_agent = true
358# personality = true405# personality = true
359# use_linux_sandbox_bwrap = false
360# runtime_metrics = true
361# powershell_utf8 = true
362# child_agents_md = false
363# sqlite = true
364# fast_mode = true406# fast_mode = true
365# enable_request_compression = true407# enable_request_compression = true
366# image_generation = false
367# skill_mcp_dependency_install = true408# skill_mcp_dependency_install = true
368# skill_env_var_dependency_prompt = false
369# default_mode_request_user_input = false
370# artifact = false
371# prevent_idle_sleep = false409# prevent_idle_sleep = false
372410# responses_websockets = false
373411# responses_websockets_v2 = false################################################################################
374412# image_detail_original = false# Memories (table)
413################################################################################
414
415# Enable memories with [features].memories, then tune memory behavior here.
416# [memories]
417# generate_memories = true
418# use_memories = true
419# disable_on_external_context = false # legacy alias: no_memories_if_mcp_or_web_search
420
421################################################################################
422# Lifecycle hooks can be configured here inline or in a sibling hooks.json.
423################################################################################
424
425# [hooks]
426# [[hooks.PreToolUse]]
427# matcher = "^Bash$"
428#
429# [[hooks.PreToolUse.hooks]]
430# type = "command"
431# command = 'python3 "/absolute/path/to/pre_tool_use_policy.py"'
432# timeout = 30
433# statusMessage = "Checking Bash command"
375 434
376################################################################################435################################################################################
377# Define MCP servers under this table. Leave empty to disable.436# Define MCP servers under this table. Leave empty to disable.
386# command = "docs-server" # required445# command = "docs-server" # required
387# args = ["--port", "4000"] # optional446# args = ["--port", "4000"] # optional
388# env = { "API_KEY" = "value" } # optional key/value pairs copied as-is447# env = { "API_KEY" = "value" } # optional key/value pairs copied as-is
389448# env_vars = ["ANOTHER_SECRET"] # optional: forward these from the parent env# env_vars = ["ANOTHER_SECRET"] # optional: forward local parent env vars
449# env_vars = ["LOCAL_TOKEN", { name = "REMOTE_TOKEN", source = "remote" }]
390# cwd = "/path/to/server" # optional working directory override450# cwd = "/path/to/server" # optional working directory override
451# experimental_environment = "remote" # experimental: run stdio via a remote executor
391# startup_timeout_sec = 10.0 # optional; default 10.0 seconds452# startup_timeout_sec = 10.0 # optional; default 10.0 seconds
392# # startup_timeout_ms = 10000 # optional alias for startup timeout (milliseconds)453# # startup_timeout_ms = 10000 # optional alias for startup timeout (milliseconds)
393# tool_timeout_sec = 60.0 # optional; default 60.0 seconds454# tool_timeout_sec = 60.0 # optional; default 60.0 seconds
418# - openai479# - openai
419# - ollama480# - ollama
420# - lmstudio481# - lmstudio
482# - amazon-bedrock
483# These IDs are reserved. Use a different ID for custom providers.
421 484
422[model_providers]485[model_providers]
423 486
487# --- Example: built-in Amazon Bedrock provider options ---
488# model_provider = "amazon-bedrock"
489# model = "<bedrock-model-id>"
490# [model_providers.amazon-bedrock.aws]
491# profile = "default"
492# region = "eu-central-1"
493
424# --- Example: OpenAI data residency with explicit base URL or headers ---494# --- Example: OpenAI data residency with explicit base URL or headers ---
425# [model_providers.openaidr]495# [model_providers.openaidr]
426# name = "OpenAI Data Residency"496# name = "OpenAI Data Residency"
427# base_url = "https://us.api.openai.com/v1" # example with 'us' domain prefix497# base_url = "https://us.api.openai.com/v1" # example with 'us' domain prefix
428# wire_api = "responses" # only supported value498# wire_api = "responses" # only supported value
429499# # requires_openai_auth = true # built-in OpenAI defaults to true# # requires_openai_auth = true # use only for providers backed by OpenAI auth
430# # request_max_retries = 4 # default 4; max 100500# # request_max_retries = 4 # default 4; max 100
431# # stream_max_retries = 5 # default 5; max 100501# # stream_max_retries = 5 # default 5; max 100
432# # stream_idle_timeout_ms = 300000 # default 300_000 (5m)502# # stream_idle_timeout_ms = 300000 # default 300_000 (5m)
445# env_key_instructions = "Set AZURE_OPENAI_API_KEY in your environment"515# env_key_instructions = "Set AZURE_OPENAI_API_KEY in your environment"
446# # supports_websockets = false516# # supports_websockets = false
447 517
518# --- Example: command-backed bearer token auth ---
519# [model_providers.proxy]
520# name = "OpenAI using LLM proxy"
521# base_url = "https://proxy.example.com/v1"
522# wire_api = "responses"
523#
524# [model_providers.proxy.auth]
525# command = "/usr/local/bin/fetch-codex-token"
526# args = ["--audience", "codex"]
527# timeout_ms = 5000
528# refresh_interval_ms = 300000
529
448# --- Example: Local OSS (e.g., Ollama-compatible) ---530# --- Example: Local OSS (e.g., Ollama-compatible) ---
449531# [model_providers.ollama]# [model_providers.local_ollama]
450# name = "Ollama"532# name = "Ollama"
451# base_url = "http://localhost:11434/v1"533# base_url = "http://localhost:11434/v1"
452# wire_api = "responses"534# wire_api = "responses"
473# enabled = false555# enabled = false
474# approval_mode = "approve"556# approval_mode = "approve"
475 557
558# Optional tool suggestion allowlist for connectors or plugins Codex can offer to install.
559# [tool_suggest]
560# discoverables = [
561# { type = "connector", id = "gmail" },
562# { type = "plugin", id = "figma@openai-curated" },
563# ]
564# disabled_tools = [
565# { type = "plugin", id = "slack@openai-curated" },
566# { type = "connector", id = "connector_googlecalendar" },
567# ]
568
476################################################################################569################################################################################
477# Profiles (named presets)570# Profiles (named presets)
478################################################################################571################################################################################