Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
SUSE:SLE-15-SP1:Update
libcontainers-common
containers.conf
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File containers.conf of Package libcontainers-common
# The containers configuration file specifies all of the available configuration # command-line options/flags for container engine tools like Podman & Buildah, # but in a TOML format that can be easily modified and versioned. # Please refer to containers.conf(5) for details of all configuration options. # Not all container engines implement all of the options. # All of the options have hard coded defaults and these options will override # the built in defaults. Users can then override these options via the command # line. Container engines will read containers.conf files in up to three # locations in the following order: # 1. /usr/share/containers/containers.conf # 2. /etc/containers/containers.conf # 3. $HOME/.config/containers/containers.conf (Rootless containers ONLY) # Items specified in the latter containers.conf, if they exist, override the # previous containers.conf settings, or the default settings. [containers] # List of devices. Specified as # "<device-on-host>:<device-on-container>:<permissions>", for example: # "/dev/sdc:/dev/xvdc:rwm". # If it is empty or commented out, only the default devices will be used # # devices = [] # List of volumes. Specified as # "<directory-on-host>:<directory-in-container>:<options>", for example: # "/db:/var/lib/db:ro". # If it is empty or commented out, no volumes will be added # # volumes = [] # Used to change the name of the default AppArmor profile of container engine. # # apparmor_profile = "container-default" # List of annotation. Specified as # "key=value" # If it is empty or commented out, no annotations will be added # # annotations = [] # Default way to to create a cgroup namespace for the container # Options are: # `private` Create private Cgroup Namespace for the container. # `host` Share host Cgroup Namespace with the container. # # cgroupns = "private" # Control container cgroup configuration # Determines whether the container will create CGroups. # Options are: # `enabled` Enable cgroup support within container # `disabled` Disable cgroup support, will inherit cgroups from parent # `no-conmon` Do not create a cgroup dedicated to conmon. # # cgroups = "enabled" # List of default capabilities for containers. If it is empty or commented out, # the default capabilities defined in the container engine will be added. # # default_capabilities = [ # "AUDIT_WRITE", # "CHOWN", # "DAC_OVERRIDE", # "FOWNER", # "FSETID", # "KILL", # "MKNOD", # "NET_BIND_SERVICE", # "NET_RAW", # "SETGID", # "SETPCAP", # "SETUID", # "SYS_CHROOT", # ] # A list of sysctls to be set in containers by default, # specified as "name=value", # for example:"net.ipv4.ping_group_range = 0 0". # #default_sysctls = [ # "net.ipv4.ping_group_range=0 0", #] # A list of ulimits to be set in containers by default, specified as # "<ulimit name>=<soft limit>:<hard limit>", for example: # "nofile=1024:2048" # See setrlimit(2) for a list of resource names. # Any limit not specified here will be inherited from the process launching the # container engine. # Ulimits has limits for non privileged container engines. # # default_ulimits = [ # "nofile=1280:2560", # ] # List of default DNS options to be added to /etc/resolv.conf inside of the container. # # dns_options = [] # List of default DNS search domains to be added to /etc/resolv.conf inside of the container. # # dns_searches = [] # Set default DNS servers. # This option can be used to override the DNS configuration passed to the # container. The special value "none" can be specified to disable creation of # /etc/resolv.conf in the container. # The /etc/resolv.conf file in the image will be used without changes. # # dns_servers = [] # Environment variable list for the conmon process; used for passing necessary # environment variables to conmon or the runtime. # # env = [ # "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", # "TERM=xterm", # ] # Pass all host environment variables into the container. # # env_host = false # Default proxy environment variables passed into the container. # The environment variables passed in include: # http_proxy, https_proxy, ftp_proxy, no_proxy, and the upper case versions of # these. This option is needed when host system uses a proxy but container # should not use proxy. Proxy environment variables specified for the container # in any other way will override the values passed from the host. # # http_proxy = true # Run an init inside the container that forwards signals and reaps processes. # # init = false # Container init binary, if init=true, this is the init binary to be used for containers. # init_path = "/usr/bin/catatonit" # Default way to to create an IPC namespace (POSIX SysV IPC) for the container # Options are: # `private` Create private IPC Namespace for the container. # `host` Share host IPC Namespace with the container. # # ipcns = "private" # keyring tells the container engine whether to create # a kernel keyring for use within the container. # keyring = true # label tells the container engine whether to use container separation using # MAC(SELinux) labeling or not. # The label flag is ignored on label disabled systems. # # label = true # Logging driver for the container. Available options: k8s-file and journald. # # log_driver = "k8s-file" # Maximum size allowed for the container log file. Negative numbers indicate # that no size limit is imposed. If positive, it must be >= 8192 to match or # exceed conmon's read buffer. The file is truncated and re-opened so the # limit is never exceeded. # # log_size_max = -1 # Default way to to create a Network namespace for the container # Options are: # `private` Create private Network Namespace for the container. # `host` Share host Network Namespace with the container. # `none` Containers do not use the network # # netns = "private" # Create /etc/hosts for the container. By default, container engine manage # /etc/hosts, automatically adding the container's own IP address. # # no_hosts = false # Maximum number of processes allowed in a container. # # pids_limit = 2048 # Default way to to create a PID namespace for the container # Options are: # `private` Create private PID Namespace for the container. # `host` Share host PID Namespace with the container. # # pidns = "private" # Path to the seccomp.json profile which is used as the default seccomp profile # for the runtime. # # seccomp_profile = "/usr/share/containers/seccomp.json" # Size of /dev/shm. Specified as <number><unit>. # Unit is optional, values: # b (bytes), k (kilobytes), m (megabytes), or g (gigabytes). # If the unit is omitted, the system uses bytes. # # shm_size = "65536k" # Set timezone in container. Takes IANA timezones as well as "local", # which sets the timezone in the container to match the host machine. # # tz = "" # Set umask inside the container # # umask="0022" # Default way to to create a UTS namespace for the container # Options are: # `private` Create private UTS Namespace for the container. # `host` Share host UTS Namespace with the container. # # utsns = "private" # Default way to to create a User namespace for the container # Options are: # `auto` Create unique User Namespace for the container. # `host` Share host User Namespace with the container. # # userns = "host" # Number of UIDs to allocate for the automatic container creation. # UIDs are allocated from the "container" UIDs listed in # /etc/subuid & /etc/subgid # # userns_size=65536 # The network table contains settings pertaining to the management of # CNI plugins. [network] # Path to directory where CNI plugin binaries are located. # cni_plugin_dirs = ["@LIBEXECDIR@/cni"] # Path to the directory where CNI configuration files are located. # # network_config_dir = "/etc/cni/net.d/" [engine] # ImageBuildFormat indicates the default image format to building # container images. Valid values are "oci" (default) or "docker". # image_build_format = "oci" # Cgroup management implementation used for the runtime. # Valid options "systemd" or "cgroupfs" # # cgroup_manager = "systemd" # Environment variables to pass into conmon # # conmon_env_vars = [ # "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" # ] # Paths to look for the conmon container manager binary # # conmon_path = [ # "/usr/libexec/podman/conmon", # "/usr/local/libexec/podman/conmon", # "/usr/local/lib/podman/conmon", # "/usr/bin/conmon", # "/usr/sbin/conmon", # "/usr/local/bin/conmon", # "/usr/local/sbin/conmon" # ] # Specify the keys sequence used to detach a container. # Format is a single character [a-Z] or a comma separated sequence of # `ctrl-<value>`, where `<value>` is one of: # `a-z`, `@`, `^`, `[`, `\`, `]`, `^` or `_` # # detach_keys = "ctrl-p,ctrl-q" # Determines whether engine will reserve ports on the host when they are # forwarded to containers. When enabled, when ports are forwarded to containers, # ports are held open by as long as the container is running, ensuring that # they cannot be reused by other programs on the host. However, this can cause # significant memory usage if a container has many ports forwarded to it. # Disabling this can save memory. # # enable_port_reservation = true # Environment variables to be used when running the container engine (e.g., Podman, Buildah). # For example "http_proxy=internal.proxy.company.com". # Note these environment variables will not be used within the container. # Set the env section under [containers] table, if you want to set environment variables for the container. # env = [] # Selects which logging mechanism to use for container engine events. # Valid values are `journald`, `file` and `none`. # # events_logger = "journald" # Path to OCI hooks directories for automatically executed hooks. # # hooks_dir = [ # "/usr/share/containers/oci/hooks.d", # ] # Default transport method for pulling and pushing for images # # image_default_transport = "docker://" # Default command to run the infra container # # infra_command = "/pause" # Infra (pause) container image name for pod infra containers. When running a # pod, we start a `pause` process in a container to hold open the namespaces # associated with the pod. This container does nothing other then sleep, # reserving the pods resources for the lifetime of the pod. # # infra_image = "k8s.gcr.io/pause:3.2" # Specify the locking mechanism to use; valid values are "shm" and "file". # Change the default only if you are sure of what you are doing, in general # "file" is useful only on platforms where cgo is not available for using the # faster "shm" lock type. You may need to run "podman system renumber" after # you change the lock type. # # lock_type** = "shm" # MultiImageArchive - if true, the container engine allows for storing archives # (e.g., of the docker-archive transport) with multiple images. By default, # Podman creates single-image archives. # # multi_image_archive = "false" # Default engine namespace # If engine is joined to a namespace, it will see only containers and pods # that were created in the same namespace, and will create new containers and # pods in that namespace. # The default namespace is "", which corresponds to no namespace. When no # namespace is set, all containers and pods are visible. # # namespace = "" # Path to the slirp4netns binary # # network_cmd_path="" # Default options to pass to the slirp4netns binary. # For example "allow_host_loopback=true" # # network_cmd_options=[] # Whether to use chroot instead of pivot_root in the runtime # # no_pivot_root = false # Number of locks available for containers and pods. # If this is changed, a lock renumber must be performed (e.g. with the # 'podman system renumber' command). # # num_locks = 2048 # Whether to pull new image before running a container # pull_policy = "missing" # Indicates whether the application should be running in remote mode. This flag modifies the # --remote option on container engines. Setting the flag to true will default # `podman --remote=true` for access to the remote Podman service. # remote = false # Directory for persistent engine files (database, etc) # By default, this will be configured relative to where the containers/storage # stores containers # Uncomment to change location from this default # # static_dir = "/var/lib/containers/storage/libpod" # Directory for temporary files. Must be tmpfs (wiped after reboot) # # tmp_dir = "/var/run/libpod" # Directory for libpod named volumes. # By default, this will be configured relative to where containers/storage # stores containers. # Uncomment to change location from this default. # # volume_path = "/var/lib/containers/storage/volumes" # Default OCI runtime # # runtime = "runc" # List of the OCI runtimes that support --format=json. When json is supported # engine will use it for reporting nicer errors. # # runtime_supports_json = ["crun", "runc", "kata"] # List of the OCI runtimes that supports running containers without cgroups. # # runtime_supports_nocgroups = ["crun"] # List of the OCI runtimes that supports running containers with KVM Separation. # # runtime_supports_kvm = ["kata"] # Number of seconds to wait for container to exit before sending kill signal. # stop_timeout = 10 # Index to the active service # active_service = production # map of service destinations # [service_destinations] # [service_destinations.production] # URI to access the Podman service # Examples: # rootless "unix://run/user/$UID/podman/podman.sock" (Default) # rootfull "unix://run/podman/podman.sock (Default) # remote rootless ssh://engineering.lab.company.com/run/user/1000/podman/podman.sock # remote rootfull ssh://root@10.10.1.136:22/run/podman/podman.sock # uri="ssh://user@production.example.com/run/user/1001/podman/podman.sock" # Path to file containing ssh identity key # identity = "~/.ssh/id_rsa" # Paths to look for a valid OCI runtime (crun, runc, kata, etc) [engine.runtimes] # runc = [ # "/usr/bin/runc", # "/usr/sbin/runc", # "/usr/local/bin/runc", # "/usr/local/sbin/runc", # "/sbin/runc", # "/bin/runc", # "/usr/lib/cri-o-runc/sbin/runc", # ] # crun = [ # "/usr/bin/crun", # "/usr/sbin/crun", # "/usr/local/bin/crun", # "/usr/local/sbin/crun", # "/sbin/crun", # "/bin/crun", # "/run/current-system/sw/bin/crun", # ] # kata = [ # "/usr/bin/kata-runtime", # "/usr/sbin/kata-runtime", # "/usr/local/bin/kata-runtime", # "/usr/local/sbin/kata-runtime", # "/sbin/kata-runtime", # "/bin/kata-runtime", # "/usr/bin/kata-qemu", # "/usr/bin/kata-fc", # ] [engine.volume_plugins] # testplugin = "/run/podman/plugins/test.sock" # The [engine.volume_plugins] table MUST be the last entry in this file. # (Unless another table is added) # TOML does not provide a way to end a table other than a further table being # defined, so every key hereafter will be part of [volume_plugins] and not the # main config.
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor