config-sample.md +69 −11
27# Core Model Selection27# Core Model Selection
28################################################################################28################################################################################
29 29
3030# Primary model used by Codex. Recommended example for most users: "gpt-5.4".# Primary model used by Codex. Recommended example for most users: "gpt-5.5".
3131model = "gpt-5.4"model = "gpt-5.5"
32 32
33# Communication style for supported models. Allowed values: none | friendly | pragmatic33# Communication style for supported models. Allowed values: none | friendly | pragmatic
34# personality = "pragmatic"34# personality = "pragmatic"
35 35
36# Optional model override for /review. Default: unset (uses current session model).36# Optional model override for /review. Default: unset (uses current session model).
3737# review_model = "gpt-5.4"# review_model = "gpt-5.5"
38 38
39# Provider id selected from [model_providers]. Default: "openai".39# Provider id selected from [model_providers]. Default: "openai".
40model_provider = "openai"40model_provider = "openai"
109# - never: never prompt (risky)109# - never: never prompt (risky)
110# - { granular = { ... } }: allow or auto-reject selected prompt categories110# - { granular = { ... } }: allow or auto-reject selected prompt categories
111approval_policy = "on-request"111approval_policy = "on-request"
112# Who reviews eligible approval prompts: user (default) | auto_review
113# approvals_reviewer = "user"
114
112# Example granular policy:115# Example granular policy:
113# approval_policy = { granular = {116# approval_policy = { granular = {
114# sandbox_approval = true,117# sandbox_approval = true,
127# - workspace-write130# - workspace-write
128# - danger-full-access (no sandbox; extremely risky)131# - danger-full-access (no sandbox; extremely risky)
129sandbox_mode = "read-only"132sandbox_mode = "read-only"
133# Named permissions profile to apply by default. Required before using [permissions.<name>].
134# default_permissions = "workspace"
135
136# Example filesystem profile. Use `"none"` to deny reads for exact paths or
137# glob patterns. On platforms that need pre-expanded glob matches, set
138# glob_scan_max_depth when using unbounded patterns such as `**`.
139# [permissions.workspace.filesystem]
140# glob_scan_max_depth = 3
141# ":project_roots" = { "." = "write", "**/*.env" = "none" }
142# "/absolute/path/to/secrets" = "none"
130 143
131################################################################################144################################################################################
132# Authentication & Login145# Authentication & Login
274# Managed network proxy settings287# Managed network proxy settings
275################################################################################288################################################################################
276 289
277290[permissions.network]# Set `default_permissions = "workspace"` before enabling this profile.
291# [permissions.workspace.network]
278# enabled = true292# enabled = true
279# proxy_url = "http://127.0.0.1:43128"293# proxy_url = "http://127.0.0.1:43128"
280# admin_url = "http://127.0.0.1:43129"294# admin_url = "http://127.0.0.1:43129"
286# dangerously_allow_non_loopback_admin = false300# dangerously_allow_non_loopback_admin = false
287# dangerously_allow_all_unix_sockets = false301# dangerously_allow_all_unix_sockets = false
288# mode = "limited" # limited | full302# mode = "limited" # limited | full
289# allowed_domains = ["api.openai.com"]
290# denied_domains = ["example.com"]
291# allow_unix_sockets = ["/var/run/docker.sock"]
292# allow_local_binding = false303# allow_local_binding = false
304#
305# [permissions.workspace.network.domains]
306# "api.openai.com" = "allow"
307# "example.com" = "deny"
308#
309# [permissions.workspace.network.unix_sockets]
310# "/var/run/docker.sock" = "allow"
293 311
294################################################################################312################################################################################
295# History (table)313# History (table)
313# Notification mechanism for terminal alerts: auto | osc9 | bel. Default: "auto"331# Notification mechanism for terminal alerts: auto | osc9 | bel. Default: "auto"
314# notification_method = "auto"332# notification_method = "auto"
315 333
334# When notifications fire: unfocused (default) | always
335# notification_condition = "unfocused"
336
316# Enables welcome/status/spinner animations. Default: true337# Enables welcome/status/spinner animations. Default: true
317animations = true338animations = true
318 339
327# Set to [] to hide the footer.348# Set to [] to hide the footer.
328# status_line = ["model", "context-remaining", "git-branch"]349# status_line = ["model", "context-remaining", "git-branch"]
329 350
351# Ordered list of terminal window/tab title item IDs. When unset, Codex uses:
352# ["spinner", "project"]. Set to [] to clear the title.
353# Available IDs include app-name, project, spinner, status, thread, git-branch, model,
354# and task-progress.
355# terminal_title = ["spinner", "project"]
356
330# Syntax-highlighting theme (kebab-case). Use /theme in the TUI to preview and save.357# Syntax-highlighting theme (kebab-case). Use /theme in the TUI to preview and save.
331# You can also add custom .tmTheme files under $CODEX_HOME/themes.358# You can also add custom .tmTheme files under $CODEX_HOME/themes.
332# theme = "catppuccin-mocha"359# theme = "catppuccin-mocha"
366# multi_agent = true393# multi_agent = true
367# personality = true394# personality = true
368# fast_mode = true395# fast_mode = true
369# smart_approvals = false
370# enable_request_compression = true396# enable_request_compression = true
371# skill_mcp_dependency_install = true397# skill_mcp_dependency_install = true
372# prevent_idle_sleep = false398# prevent_idle_sleep = false
373 399
400################################################################################
401# Memories (table)
402################################################################################
403
404# Enable memories with [features].memories, then tune memory behavior here.
405# [memories]
406# generate_memories = true
407# use_memories = true
408# disable_on_external_context = false # legacy alias: no_memories_if_mcp_or_web_search
409
374################################################################################410################################################################################
375# Define MCP servers under this table. Leave empty to disable.411# Define MCP servers under this table. Leave empty to disable.
376################################################################################412################################################################################
384# command = "docs-server" # required420# command = "docs-server" # required
385# args = ["--port", "4000"] # optional421# args = ["--port", "4000"] # optional
386# env = { "API_KEY" = "value" } # optional key/value pairs copied as-is422# env = { "API_KEY" = "value" } # optional key/value pairs copied as-is
387423# env_vars = ["ANOTHER_SECRET"] # optional: forward these from the parent env# env_vars = ["ANOTHER_SECRET"] # optional: forward local parent env vars
424# env_vars = ["LOCAL_TOKEN", { name = "REMOTE_TOKEN", source = "remote" }]
388# cwd = "/path/to/server" # optional working directory override425# cwd = "/path/to/server" # optional working directory override
426# experimental_environment = "remote" # experimental: run stdio via a remote executor
389# startup_timeout_sec = 10.0 # optional; default 10.0 seconds427# startup_timeout_sec = 10.0 # optional; default 10.0 seconds
390# # startup_timeout_ms = 10000 # optional alias for startup timeout (milliseconds)428# # startup_timeout_ms = 10000 # optional alias for startup timeout (milliseconds)
391# tool_timeout_sec = 60.0 # optional; default 60.0 seconds429# tool_timeout_sec = 60.0 # optional; default 60.0 seconds
416# - openai454# - openai
417# - ollama455# - ollama
418# - lmstudio456# - lmstudio
457# These IDs are reserved. Use a different ID for custom providers.
419 458
420[model_providers]459[model_providers]
421 460
424# name = "OpenAI Data Residency"463# name = "OpenAI Data Residency"
425# base_url = "https://us.api.openai.com/v1" # example with 'us' domain prefix464# base_url = "https://us.api.openai.com/v1" # example with 'us' domain prefix
426# wire_api = "responses" # only supported value465# wire_api = "responses" # only supported value
427466# # requires_openai_auth = true # built-in OpenAI defaults to true# # requires_openai_auth = true # use only for providers backed by OpenAI auth
428# # request_max_retries = 4 # default 4; max 100467# # request_max_retries = 4 # default 4; max 100
429# # stream_max_retries = 5 # default 5; max 100468# # stream_max_retries = 5 # default 5; max 100
430# # stream_idle_timeout_ms = 300000 # default 300_000 (5m)469# # stream_idle_timeout_ms = 300000 # default 300_000 (5m)
443# env_key_instructions = "Set AZURE_OPENAI_API_KEY in your environment"482# env_key_instructions = "Set AZURE_OPENAI_API_KEY in your environment"
444# # supports_websockets = false483# # supports_websockets = false
445 484
485# --- Example: command-backed bearer token auth ---
486# [model_providers.proxy]
487# name = "OpenAI using LLM proxy"
488# base_url = "https://proxy.example.com/v1"
489# wire_api = "responses"
490#
491# [model_providers.proxy.auth]
492# command = "/usr/local/bin/fetch-codex-token"
493# args = ["--audience", "codex"]
494# timeout_ms = 5000
495# refresh_interval_ms = 300000
496
446# --- Example: Local OSS (e.g., Ollama-compatible) ---497# --- Example: Local OSS (e.g., Ollama-compatible) ---
447498# [model_providers.ollama]# [model_providers.local_ollama]
448# name = "Ollama"499# name = "Ollama"
449# base_url = "http://localhost:11434/v1"500# base_url = "http://localhost:11434/v1"
450# wire_api = "responses"501# wire_api = "responses"
471# enabled = false522# enabled = false
472# approval_mode = "approve"523# approval_mode = "approve"
473 524
525# Optional tool suggestion allowlist for connectors or plugins Codex can offer to install.
526# [tool_suggest]
527# discoverables = [
528# { type = "connector", id = "gmail" },
529# { type = "plugin", id = "figma@openai-curated" },
530# ]
531
474################################################################################532################################################################################
475# Profiles (named presets)533# Profiles (named presets)
476################################################################################534################################################################################