config-sample.md +100 −30
27# Core Model Selection27# Core Model Selection
28################################################################################28################################################################################
29 29
3030# Primary model used by Codex. Recommended example for most users: "gpt-5.4".# Primary model used by Codex. Recommended example for most users: "gpt-5.5".
3131model = "gpt-5.4"model = "gpt-5.5"
32 32
33# Communication style for supported models. Allowed values: none | friendly | pragmatic33# Communication style for supported models. Allowed values: none | friendly | pragmatic
34# personality = "pragmatic"34# personality = "pragmatic"
35 35
36# Optional model override for /review. Default: unset (uses current session model).36# Optional model override for /review. Default: unset (uses current session model).
3737# review_model = "gpt-5.4"# review_model = "gpt-5.5"
38 38
39# Provider id selected from [model_providers]. Default: "openai".39# Provider id selected from [model_providers]. Default: "openai".
40model_provider = "openai"40model_provider = "openai"
107# - untrusted: only known-safe read-only commands auto-run; others prompt107# - untrusted: only known-safe read-only commands auto-run; others prompt
108# - on-request: model decides when to ask (default)108# - on-request: model decides when to ask (default)
109# - never: never prompt (risky)109# - never: never prompt (risky)
110110# - { reject = { ... } }: auto-reject selected prompt categories# - { granular = { ... } }: allow or auto-reject selected prompt categories
111approval_policy = "on-request"111approval_policy = "on-request"
112112# Example granular auto-reject policy:# Who reviews eligible approval prompts: user (default) | auto_review
113113# approval_policy = { reject = { sandbox_approval = true, rules = false, mcp_elicitations = false } }# approvals_reviewer = "user"
114
115# Example granular policy:
116# approval_policy = { granular = {
117# sandbox_approval = true,
118# rules = true,
119# mcp_elicitations = true,
120# request_permissions = false,
121# skill_approval = false
122# } }
114 123
115# Allow login-shell semantics for shell-based tools when they request `login = true`.124# Allow login-shell semantics for shell-based tools when they request `login = true`.
116# Default: true. Set false to force non-login shells and reject explicit login-shell requests.125# Default: true. Set false to force non-login shells and reject explicit login-shell requests.
121# - workspace-write130# - workspace-write
122# - danger-full-access (no sandbox; extremely risky)131# - danger-full-access (no sandbox; extremely risky)
123sandbox_mode = "read-only"132sandbox_mode = "read-only"
133# Named permissions profile to apply by default. Required before using [permissions.<name>].
134# default_permissions = "workspace"
135
136# Example filesystem profile. Use `"none"` to deny reads for exact paths or
137# glob patterns. On platforms that need pre-expanded glob matches, set
138# glob_scan_max_depth when using unbounded patterns such as `**`.
139# [permissions.workspace.filesystem]
140# glob_scan_max_depth = 3
141# ":project_roots" = { "." = "write", "**/*.env" = "none" }
142# "/absolute/path/to/secrets" = "none"
124 143
125################################################################################144################################################################################
126# Authentication & Login145# Authentication & Login
132# Base URL for ChatGPT auth flow (not OpenAI API).151# Base URL for ChatGPT auth flow (not OpenAI API).
133chatgpt_base_url = "https://chatgpt.com/backend-api/"152chatgpt_base_url = "https://chatgpt.com/backend-api/"
134 153
154# Optional base URL override for the built-in OpenAI provider.
155# openai_base_url = "https://us.api.openai.com/v1"
156
135# Restrict ChatGPT login to a specific workspace id. Default: unset.157# Restrict ChatGPT login to a specific workspace id. Default: unset.
136# forced_chatgpt_workspace_id = "00000000-0000-0000-0000-000000000000"158# forced_chatgpt_workspace_id = "00000000-0000-0000-0000-000000000000"
137 159
265# Managed network proxy settings287# Managed network proxy settings
266################################################################################288################################################################################
267 289
268290[permissions.network]# Set `default_permissions = "workspace"` before enabling this profile.
291# [permissions.workspace.network]
269# enabled = true292# enabled = true
270# proxy_url = "http://127.0.0.1:43128"293# proxy_url = "http://127.0.0.1:43128"
271# admin_url = "http://127.0.0.1:43129"294# admin_url = "http://127.0.0.1:43129"
277# dangerously_allow_non_loopback_admin = false300# dangerously_allow_non_loopback_admin = false
278# dangerously_allow_all_unix_sockets = false301# dangerously_allow_all_unix_sockets = false
279# mode = "limited" # limited | full302# mode = "limited" # limited | full
280# allowed_domains = ["api.openai.com"]
281# denied_domains = ["example.com"]
282# allow_unix_sockets = ["/var/run/docker.sock"]
283# allow_local_binding = false303# allow_local_binding = false
304#
305# [permissions.workspace.network.domains]
306# "api.openai.com" = "allow"
307# "example.com" = "deny"
308#
309# [permissions.workspace.network.unix_sockets]
310# "/var/run/docker.sock" = "allow"
284 311
285################################################################################312################################################################################
286# History (table)313# History (table)
304# Notification mechanism for terminal alerts: auto | osc9 | bel. Default: "auto"331# Notification mechanism for terminal alerts: auto | osc9 | bel. Default: "auto"
305# notification_method = "auto"332# notification_method = "auto"
306 333
334# When notifications fire: unfocused (default) | always
335# notification_condition = "unfocused"
336
307# Enables welcome/status/spinner animations. Default: true337# Enables welcome/status/spinner animations. Default: true
308animations = true338animations = true
309 339
318# Set to [] to hide the footer.348# Set to [] to hide the footer.
319# status_line = ["model", "context-remaining", "git-branch"]349# status_line = ["model", "context-remaining", "git-branch"]
320 350
351# Ordered list of terminal window/tab title item IDs. When unset, Codex uses:
352# ["spinner", "project"]. Set to [] to clear the title.
353# Available IDs include app-name, project, spinner, status, thread, git-branch, model,
354# and task-progress.
355# terminal_title = ["spinner", "project"]
356
321# Syntax-highlighting theme (kebab-case). Use /theme in the TUI to preview and save.357# Syntax-highlighting theme (kebab-case). Use /theme in the TUI to preview and save.
322# You can also add custom .tmTheme files under $CODEX_HOME/themes.358# You can also add custom .tmTheme files under $CODEX_HOME/themes.
323# theme = "catppuccin-mocha"359# theme = "catppuccin-mocha"
341# hide_rate_limit_model_nudge = true377# hide_rate_limit_model_nudge = true
342# hide_gpt5_1_migration_prompt = true378# hide_gpt5_1_migration_prompt = true
343# "hide_gpt-5.1-codex-max_migration_prompt" = true379# "hide_gpt-5.1-codex-max_migration_prompt" = true
344380# model_migrations = { "gpt-4.1" = "gpt-5.1" }# model_migrations = { "gpt-5.3-codex" = "gpt-5.4" }
345 381
346################################################################################382################################################################################
347# Centralized Feature Flags (preferred)383# Centralized Feature Flags (preferred)
351# Leave this table empty to accept defaults. Set explicit booleans to opt in/out.387# Leave this table empty to accept defaults. Set explicit booleans to opt in/out.
352# shell_tool = true388# shell_tool = true
353# apps = false389# apps = false
354390# apps_mcp_gateway = false# codex_hooks = false
355391# unified_exec = false# unified_exec = true
356392# shell_snapshot = false# shell_snapshot = true
357393# multi_agent = false# multi_agent = true
358# personality = true394# personality = true
359# use_linux_sandbox_bwrap = false
360# runtime_metrics = true
361# powershell_utf8 = true
362# child_agents_md = false
363# sqlite = true
364# fast_mode = true395# fast_mode = true
365# enable_request_compression = true396# enable_request_compression = true
366# image_generation = false
367# skill_mcp_dependency_install = true397# skill_mcp_dependency_install = true
368# skill_env_var_dependency_prompt = false
369# default_mode_request_user_input = false
370# artifact = false
371# prevent_idle_sleep = false398# prevent_idle_sleep = false
372399# responses_websockets = false
373400# responses_websockets_v2 = false################################################################################
374401# image_detail_original = false# Memories (table)
402################################################################################
403
404# Enable memories with [features].memories, then tune memory behavior here.
405# [memories]
406# generate_memories = true
407# use_memories = true
408# disable_on_external_context = false # legacy alias: no_memories_if_mcp_or_web_search
409
410################################################################################
411# Lifecycle hooks can be configured here inline or in a sibling hooks.json.
412################################################################################
413
414# [hooks]
415# [[hooks.PreToolUse]]
416# matcher = "^Bash$"
417#
418# [[hooks.PreToolUse.hooks]]
419# type = "command"
420# command = 'python3 "/absolute/path/to/pre_tool_use_policy.py"'
421# timeout = 30
422# statusMessage = "Checking Bash command"
375 423
376################################################################################424################################################################################
377# Define MCP servers under this table. Leave empty to disable.425# Define MCP servers under this table. Leave empty to disable.
386# command = "docs-server" # required434# command = "docs-server" # required
387# args = ["--port", "4000"] # optional435# args = ["--port", "4000"] # optional
388# env = { "API_KEY" = "value" } # optional key/value pairs copied as-is436# env = { "API_KEY" = "value" } # optional key/value pairs copied as-is
389437# env_vars = ["ANOTHER_SECRET"] # optional: forward these from the parent env# env_vars = ["ANOTHER_SECRET"] # optional: forward local parent env vars
438# env_vars = ["LOCAL_TOKEN", { name = "REMOTE_TOKEN", source = "remote" }]
390# cwd = "/path/to/server" # optional working directory override439# cwd = "/path/to/server" # optional working directory override
440# experimental_environment = "remote" # experimental: run stdio via a remote executor
391# startup_timeout_sec = 10.0 # optional; default 10.0 seconds441# startup_timeout_sec = 10.0 # optional; default 10.0 seconds
392# # startup_timeout_ms = 10000 # optional alias for startup timeout (milliseconds)442# # startup_timeout_ms = 10000 # optional alias for startup timeout (milliseconds)
393# tool_timeout_sec = 60.0 # optional; default 60.0 seconds443# tool_timeout_sec = 60.0 # optional; default 60.0 seconds
418# - openai468# - openai
419# - ollama469# - ollama
420# - lmstudio470# - lmstudio
471# These IDs are reserved. Use a different ID for custom providers.
421 472
422[model_providers]473[model_providers]
423 474
426# name = "OpenAI Data Residency"477# name = "OpenAI Data Residency"
427# base_url = "https://us.api.openai.com/v1" # example with 'us' domain prefix478# base_url = "https://us.api.openai.com/v1" # example with 'us' domain prefix
428# wire_api = "responses" # only supported value479# wire_api = "responses" # only supported value
429480# # requires_openai_auth = true # built-in OpenAI defaults to true# # requires_openai_auth = true # use only for providers backed by OpenAI auth
430# # request_max_retries = 4 # default 4; max 100481# # request_max_retries = 4 # default 4; max 100
431# # stream_max_retries = 5 # default 5; max 100482# # stream_max_retries = 5 # default 5; max 100
432# # stream_idle_timeout_ms = 300000 # default 300_000 (5m)483# # stream_idle_timeout_ms = 300000 # default 300_000 (5m)
445# env_key_instructions = "Set AZURE_OPENAI_API_KEY in your environment"496# env_key_instructions = "Set AZURE_OPENAI_API_KEY in your environment"
446# # supports_websockets = false497# # supports_websockets = false
447 498
499# --- Example: command-backed bearer token auth ---
500# [model_providers.proxy]
501# name = "OpenAI using LLM proxy"
502# base_url = "https://proxy.example.com/v1"
503# wire_api = "responses"
504#
505# [model_providers.proxy.auth]
506# command = "/usr/local/bin/fetch-codex-token"
507# args = ["--audience", "codex"]
508# timeout_ms = 5000
509# refresh_interval_ms = 300000
510
448# --- Example: Local OSS (e.g., Ollama-compatible) ---511# --- Example: Local OSS (e.g., Ollama-compatible) ---
449512# [model_providers.ollama]# [model_providers.local_ollama]
450# name = "Ollama"513# name = "Ollama"
451# base_url = "http://localhost:11434/v1"514# base_url = "http://localhost:11434/v1"
452# wire_api = "responses"515# wire_api = "responses"
473# enabled = false536# enabled = false
474# approval_mode = "approve"537# approval_mode = "approve"
475 538
539# Optional tool suggestion allowlist for connectors or plugins Codex can offer to install.
540# [tool_suggest]
541# discoverables = [
542# { type = "connector", id = "gmail" },
543# { type = "plugin", id = "figma@openai-curated" },
544# ]
545
476################################################################################546################################################################################
477# Profiles (named presets)547# Profiles (named presets)
478################################################################################548################################################################################