From 1a9dd82874b52bc44429254ee8b352fa6b94d7cd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Dec 2024 02:18:14 +0000 Subject: [PATCH] chore(deps): bump github.com/docker/docker Bumps [github.com/docker/docker](https://github.com/docker/docker) from 27.0.3+incompatible to 27.4.1+incompatible. - [Release notes](https://github.com/docker/docker/releases) - [Commits](https://github.com/docker/docker/compare/v27.0.3...v27.4.1) --- updated-dependencies: - dependency-name: github.com/docker/docker dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 3 +- go.sum | 6 +- vendor/github.com/docker/docker/api/common.go | 2 +- .../github.com/docker/docker/api/swagger.yaml | 225 +++++-- .../docker/api/types/container/hostconfig.go | 5 +- .../docker/docker/api/types/filters/parse.go | 2 +- .../docker/docker/api/types/image/manifest.go | 99 +++ .../docker/docker/api/types/image/opts.go | 3 + .../docker/docker/api/types/image/summary.go | 15 +- .../docker/api/types/registry/authconfig.go | 14 +- .../docker/docker/api/types/swarm/swarm.go | 2 +- .../docker/docker/api/types/system/info.go | 7 - .../docker/docker/api/types/types.go | 2 + .../docker/api/types/volume/cluster_volume.go | 2 +- .../docker/builder/dockerfile/builder.go | 4 +- .../docker/builder/dockerfile/dispatchers.go | 6 +- .../docker/builder/dockerfile/evaluator.go | 2 +- .../docker/builder/dockerfile/imagecontext.go | 2 +- .../docker/builder/dockerfile/internals.go | 2 +- .../builder/dockerfile/internals_linux.go | 10 +- .../builder/dockerfile/internals_windows.go | 2 +- .../github.com/docker/docker/client/client.go | 10 +- .../docker/docker/client/image_list.go | 8 + .../github.com/docker/docker/client/ping.go | 4 +- .../docker/docker/container/container.go | 19 +- .../docker/docker/container/stream/streams.go | 13 +- .../docker/docker/container/view.go | 22 +- .../docker/docker/daemon/logger/adapter.go | 8 +- .../docker/daemon/logger/jsonfilelog/read.go | 8 +- .../docker/docker/daemon/logger/local/read.go | 6 +- .../docker/docker/daemon/logger/logger.go | 3 +- .../logger/loggerutils/cache/local_cache.go | 6 +- .../daemon/logger/loggerutils/follow.go | 18 +- .../daemon/logger/loggerutils/logfile.go | 566 +++++++++++++----- .../daemon/logger/loggerutils/sharedtemp.go | 2 +- .../docker/daemon/logger/plugin_unix.go | 2 +- .../docker/docker/daemon/logger/ring.go | 10 +- .../docker/internal/sliceutil/sliceutil.go | 2 +- .../docker/docker/layer/filestore.go | 33 +- .../github.com/docker/docker/layer/layer.go | 6 +- .../docker/docker/oci/caps/utils.go | 2 +- .../docker/pkg/archive/archive_linux.go | 2 +- .../docker/pkg/chrootarchive/archive.go | 2 +- .../docker/pkg/chrootarchive/diff_unix.go | 2 +- .../docker/pkg/containerfs/containerfs.go | 15 - .../docker/docker/pkg/containerfs/rm.go | 78 --- .../docker/pkg/containerfs/rm_windows.go | 6 - .../docker/pkg/jsonmessage/jsonmessage.go | 2 +- .../docker/docker/pkg/plugins/discovery.go | 2 +- .../docker/docker/pkg/plugins/plugins.go | 1 - .../docker/docker/pkg/pools/pools.go | 2 +- .../docker/docker/pkg/system/lstat_unix.go | 4 +- .../docker/docker/pkg/system/lstat_windows.go | 2 + .../docker/docker/pkg/system/mknod.go | 2 + .../docker/docker/pkg/system/mknod_freebsd.go | 2 + .../docker/docker/pkg/system/mknod_unix.go | 2 + .../docker/docker/pkg/system/stat_linux.go | 2 + .../docker/docker/pkg/system/stat_unix.go | 6 +- .../docker/docker/pkg/system/stat_windows.go | 6 +- .../docker/docker/pkg/system/xattrs_linux.go | 2 +- .../docker/docker/pkg/tailfile/tailfile.go | 8 +- .../docker/docker/plugin/v2/plugin.go | 2 + .../docker/docker/plugin/v2/plugin_linux.go | 4 +- .../docker/volume/mounts/linux_parser.go | 6 +- .../docker/volume/mounts/windows_parser.go | 6 +- vendor/github.com/moby/sys/userns/LICENSE | 202 +++++++ vendor/github.com/moby/sys/userns/userns.go | 16 + .../moby/sys/userns/userns_linux.go | 53 ++ .../moby/sys/userns/userns_linux_fuzzer.go | 8 + .../moby/sys/userns/userns_unsupported.go | 6 + vendor/modules.txt | 6 +- 71 files changed, 1202 insertions(+), 418 deletions(-) create mode 100644 vendor/github.com/docker/docker/api/types/image/manifest.go delete mode 100644 vendor/github.com/docker/docker/pkg/containerfs/containerfs.go delete mode 100644 vendor/github.com/docker/docker/pkg/containerfs/rm.go delete mode 100644 vendor/github.com/docker/docker/pkg/containerfs/rm_windows.go create mode 100644 vendor/github.com/moby/sys/userns/LICENSE create mode 100644 vendor/github.com/moby/sys/userns/userns.go create mode 100644 vendor/github.com/moby/sys/userns/userns_linux.go create mode 100644 vendor/github.com/moby/sys/userns/userns_linux_fuzzer.go create mode 100644 vendor/github.com/moby/sys/userns/userns_unsupported.go diff --git a/go.mod b/go.mod index d38d66d997..5a81e784ee 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20240419161514-af205d85bb44 github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 github.com/containerd/cgroups v1.1.0 // indirect - github.com/docker/docker v27.0.3+incompatible + github.com/docker/docker v27.4.1+incompatible github.com/go-git/go-billy/v5 v5.5.0 github.com/go-git/go-git/v5 v5.12.0 github.com/golang/mock v1.6.0 @@ -171,6 +171,7 @@ require ( github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/swarmkit/v2 v2.0.0-20230315203717-e28e8ba9bc83 // indirect github.com/moby/sys/user v0.1.0 // indirect + github.com/moby/sys/userns v0.1.0 // indirect github.com/pelletier/go-toml/v2 v2.2.1 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect diff --git a/go.sum b/go.sum index 8755d68981..55d9465817 100644 --- a/go.sum +++ b/go.sum @@ -184,8 +184,8 @@ github.com/docker/cli v26.1.4+incompatible h1:I8PHdc0MtxEADqYJZvhBrW9bo8gawKwwen github.com/docker/cli v26.1.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE= -github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.4.1+incompatible h1:ZJvcY7gfwHn1JF48PfbyXg7Jyt9ZCWDW+GGXOIxEwp4= +github.com/docker/docker v27.4.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo= github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= @@ -375,6 +375,8 @@ github.com/moby/sys/symlink v0.2.0 h1:tk1rOM+Ljp0nFmfOIBtlV3rTDlWOwFRhjEeAhZB0nZ github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go index f831735f84..93d64cd8d5 100644 --- a/vendor/github.com/docker/docker/api/common.go +++ b/vendor/github.com/docker/docker/api/common.go @@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api" // Common constants for daemon and client. const ( // DefaultVersion of the current REST API. - DefaultVersion = "1.46" + DefaultVersion = "1.47" // MinSupportedAPIVersion is the minimum API version that can be supported // by the API server, specified as "major.minor". Note that the daemon diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index cc754bf1fd..f519806cd4 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -19,10 +19,10 @@ produces: consumes: - "application/json" - "text/plain" -basePath: "/v1.46" +basePath: "/v1.47" info: title: "Docker Engine API" - version: "1.46" + version: "1.47" x-logo: url: "https://docs.docker.com/assets/images/logo-docker-main.png" description: | @@ -55,8 +55,8 @@ info: the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. - If you omit the version-prefix, the current version of the API (v1.46) is used. - For example, calling `/info` is the same as calling `/v1.46/info`. Using the + If you omit the version-prefix, the current version of the API (v1.47) is used. + For example, calling `/info` is the same as calling `/v1.47/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, @@ -393,7 +393,7 @@ definitions: Make the mount non-recursively read-only, but still leave the mount recursive (unless NonRecursive is set to `true` in conjunction). - Addded in v1.44, before that version all read-only mounts were + Added in v1.44, before that version all read-only mounts were non-recursive by default. To match the previous behaviour this will default to `true` for clients on versions prior to v1.44. type: "boolean" @@ -1384,7 +1384,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.47. + > always empty. It must not be used, and will be removed in API v1.48. type: "string" example: "" Domainname: @@ -1394,7 +1394,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.47. + > always empty. It must not be used, and will be removed in API v1.48. type: "string" example: "" User: @@ -1408,7 +1408,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.47. + > always false. It must not be used, and will be removed in API v1.48. type: "boolean" default: false example: false @@ -1419,7 +1419,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.47. + > always false. It must not be used, and will be removed in API v1.48. type: "boolean" default: false example: false @@ -1430,7 +1430,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.47. + > always false. It must not be used, and will be removed in API v1.48. type: "boolean" default: false example: false @@ -1457,7 +1457,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.47. + > always false. It must not be used, and will be removed in API v1.48. type: "boolean" default: false example: false @@ -1468,7 +1468,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.47. + > always false. It must not be used, and will be removed in API v1.48. type: "boolean" default: false example: false @@ -1479,7 +1479,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.47. + > always false. It must not be used, and will be removed in API v1.48. type: "boolean" default: false example: false @@ -1516,7 +1516,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.47. + > always empty. It must not be used, and will be removed in API v1.48. type: "string" default: "" example: "" @@ -1555,7 +1555,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.47. + > always omitted. It must not be used, and will be removed in API v1.48. type: "boolean" default: false example: false @@ -1567,7 +1567,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.47. + > always omitted. It must not be used, and will be removed in API v1.48. type: "string" default: "" example: "" @@ -1601,7 +1601,7 @@ definitions:


> **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.47. + > always omitted. It must not be used, and will be removed in API v1.48. type: "integer" default: 10 x-nullable: true @@ -2216,7 +2216,7 @@ definitions: Created: description: | Date and time at which the image was created as a Unix timestamp - (number of seconds sinds EPOCH). + (number of seconds since EPOCH). type: "integer" x-nullable: false example: "1644009612" @@ -2265,6 +2265,19 @@ definitions: x-nullable: false type: "integer" example: 2 + Manifests: + description: | + Manifests is a list of manifests available in this image. + It provides a more detailed view of the platform-specific image manifests + or other image-attached data like build attestations. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + type: "array" + x-nullable: false + x-omitempty: true + items: + $ref: "#/definitions/ImageManifestSummary" AuthConfig: type: "object" @@ -2500,7 +2513,7 @@ definitions: example: false Attachable: description: | - Wheter a global / swarm scope network is manually attachable by regular + Whether a global / swarm scope network is manually attachable by regular containers from workers in swarm mode. type: "boolean" default: false @@ -3723,7 +3736,7 @@ definitions: example: "json-file" Options: description: | - Driver-specific options for the selectd log driver, specified + Driver-specific options for the selected log driver, specified as key/value pairs. type: "object" additionalProperties: @@ -5318,7 +5331,7 @@ definitions: description: | The default (and highest) API version that is supported by the daemon type: "string" - example: "1.46" + example: "1.47" MinAPIVersion: description: | The minimum API version that is supported by the daemon @@ -5334,7 +5347,7 @@ definitions: The version Go used to compile the daemon, and the version of the Go runtime in use. type: "string" - example: "go1.21.11" + example: "go1.22.7" Os: description: | The operating system that the daemon is running on ("linux" or "windows") @@ -5807,8 +5820,6 @@ definitions: type: "string" example: - "WARNING: No memory limit support" - - "WARNING: bridge-nf-call-iptables is disabled" - - "WARNING: bridge-nf-call-ip6tables is disabled" CDISpecDirs: description: | List of directories where (Container Device Interface) CDI @@ -5830,13 +5841,13 @@ definitions: - "/var/run/cdi" Containerd: $ref: "#/definitions/ContainerdInfo" - x-nullable: true ContainerdInfo: description: | Information for connecting to the containerd instance that is used by the daemon. This is included for debugging purposes only. type: "object" + x-nullable: true properties: Address: description: "The address of the containerd socket." @@ -6644,6 +6655,120 @@ definitions: additionalProperties: type: "string" + ImageManifestSummary: + x-go-name: "ManifestSummary" + description: | + ImageManifestSummary represents a summary of an image manifest. + type: "object" + required: ["ID", "Descriptor", "Available", "Size", "Kind"] + properties: + ID: + description: | + ID is the content-addressable ID of an image and is the same as the + digest of the image manifest. + type: "string" + example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Available: + description: Indicates whether all the child content (image config, layers) is fully available locally. + type: "boolean" + example: true + Size: + type: "object" + x-nullable: false + required: ["Content", "Total"] + properties: + Total: + type: "integer" + format: "int64" + example: 8213251 + description: | + Total is the total size (in bytes) of all the locally present + data (both distributable and non-distributable) that's related to + this manifest and its children. + This equal to the sum of [Content] size AND all the sizes in the + [Size] struct present in the Kind-specific data struct. + For example, for an image kind (Kind == "image") + this would include the size of the image content and unpacked + image snapshots ([Size.Content] + [ImageData.Size.Unpacked]). + Content: + description: | + Content is the size (in bytes) of all the locally present + content in the content store (e.g. image config, layers) + referenced by this manifest and its children. + This only includes blobs in the content store. + type: "integer" + format: "int64" + example: 3987495 + Kind: + type: "string" + example: "image" + enum: + - "image" + - "attestation" + - "unknown" + description: | + The kind of the manifest. + + kind | description + -------------|----------------------------------------------------------- + image | Image manifest that can be used to start a container. + attestation | Attestation manifest produced by the Buildkit builder for a specific image manifest. + ImageData: + description: | + The image data for the image manifest. + This field is only populated when Kind is "image". + type: "object" + x-nullable: true + x-omitempty: true + required: ["Platform", "Containers", "Size", "UnpackedSize"] + properties: + Platform: + $ref: "#/definitions/OCIPlatform" + description: | + OCI platform of the image. This will be the platform specified in the + manifest descriptor from the index/manifest list. + If it's not available, it will be obtained from the image config. + Containers: + description: | + The IDs of the containers that are using this image. + type: "array" + items: + type: "string" + example: ["ede54ee1fda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c7430", "abadbce344c096744d8d6071a90d474d28af8f1034b5ea9fb03c3f4bfc6d005e"] + Size: + type: "object" + x-nullable: false + required: ["Unpacked"] + properties: + Unpacked: + type: "integer" + format: "int64" + example: 3987495 + description: | + Unpacked is the size (in bytes) of the locally unpacked + (uncompressed) image content that's directly usable by the containers + running this image. + It's independent of the distributable content - e.g. + the image might still have an unpacked data that's still used by + some container even when the distributable/compressed content is + already gone. + AttestationData: + description: | + The image data for the attestation manifest. + This field is only populated when Kind is "attestation". + type: "object" + x-nullable: true + x-omitempty: true + required: ["For"] + properties: + For: + description: | + The digest of the image manifest that this attestation is for. + type: "string" + example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" + paths: /containers/json: get: @@ -7585,7 +7710,7 @@ paths: * Memory usage % = `(used_memory / available_memory) * 100.0` * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` - * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` + * number_cpus = `length(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` operationId: "ContainerStats" produces: ["application/json"] @@ -7749,10 +7874,12 @@ paths: type: "string" - name: "h" in: "query" + required: true description: "Height of the TTY session in characters" type: "integer" - name: "w" in: "query" + required: true description: "Width of the TTY session in characters" type: "integer" tags: ["Container"] @@ -8622,6 +8749,11 @@ paths: description: "Show digest information as a `RepoDigests` field on each image." type: "boolean" default: false + - name: "manifests" + in: "query" + description: "Include `Manifests` in the image summary." + type: "boolean" + default: false tags: ["Image"] /build: post: @@ -9094,13 +9226,37 @@ paths: parameters: - name: "name" in: "path" - description: "Image name or ID." + description: | + Name of the image to push. For example, `registry.example.com/myimage`. + The image must be present in the local image store with the same name. + + The name should be provided without tag; if a tag is provided, it + is ignored. For example, `registry.example.com/myimage:latest` is + considered equivalent to `registry.example.com/myimage`. + + Use the `tag` parameter to specify the tag to push. type: "string" required: true - name: "tag" in: "query" - description: "The tag to associate with the image on the registry." + description: | + Tag of the image to push. For example, `latest`. If no tag is provided, + all tags of the given image that are present in the local image store + are pushed. + type: "string" + - name: "platform" type: "string" + in: "query" + description: | + JSON-encoded OCI platform to select the platform-variant to push. + If not provided, all available variants will attempt to be pushed. + + If the daemon provides a multi-platform image store, this selects + the platform-variant to push to the registry. If the image is + a single-platform image, or if the multi-platform image does not + provide a variant matching the given platform, an error is returned. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` - name: "X-Registry-Auth" in: "header" description: | @@ -9110,11 +9266,6 @@ paths: details. type: "string" required: true - - name: "platform" - in: "query" - description: "Select a platform-specific manifest to be pushed. OCI platform (JSON encoded)" - type: "string" - x-nullable: true tags: ["Image"] /images/{name}/tag: post: @@ -9410,7 +9561,7 @@ paths: type: "string" example: "OK" headers: - API-Version: + Api-Version: type: "string" description: "Max API Version the server supports" Builder-Version: @@ -9466,7 +9617,7 @@ paths: type: "string" example: "(empty)" headers: - API-Version: + Api-Version: type: "string" description: "Max API Version the server supports" Builder-Version: @@ -9563,7 +9714,7 @@ paths: Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` - Images report these events: `create, `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` + Images report these events: `create`, `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` @@ -10060,10 +10211,12 @@ paths: type: "string" - name: "h" in: "query" + required: true description: "Height of the TTY session in characters" type: "integer" - name: "w" in: "query" + required: true description: "Width of the TTY session in characters" type: "integer" tags: ["Exec"] diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig.go b/vendor/github.com/docker/docker/api/types/container/hostconfig.go index 727da8839c..03648fb7b5 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig.go @@ -1,6 +1,7 @@ package container // import "github.com/docker/docker/api/types/container" import ( + "errors" "fmt" "strings" @@ -325,12 +326,12 @@ func ValidateRestartPolicy(policy RestartPolicy) error { if policy.MaximumRetryCount < 0 { msg += " and cannot be negative" } - return &errInvalidParameter{fmt.Errorf(msg)} + return &errInvalidParameter{errors.New(msg)} } return nil case RestartPolicyOnFailure: if policy.MaximumRetryCount < 0 { - return &errInvalidParameter{fmt.Errorf("invalid restart policy: maximum retry count cannot be negative")} + return &errInvalidParameter{errors.New("invalid restart policy: maximum retry count cannot be negative")} } return nil case "": diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go index 0c39ab5f18..0914b2a441 100644 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -196,7 +196,7 @@ func (args Args) Match(field, source string) bool { } // GetBoolOrDefault returns a boolean value of the key if the key is present -// and is intepretable as a boolean value. Otherwise the default value is returned. +// and is interpretable as a boolean value. Otherwise the default value is returned. // Error is not nil only if the filter values are not valid boolean or are conflicting. func (args Args) GetBoolOrDefault(key string, defaultValue bool) (bool, error) { fieldValues, ok := args.fields[key] diff --git a/vendor/github.com/docker/docker/api/types/image/manifest.go b/vendor/github.com/docker/docker/api/types/image/manifest.go new file mode 100644 index 0000000000..db8a00830e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image/manifest.go @@ -0,0 +1,99 @@ +package image + +import ( + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type ManifestKind string + +const ( + ManifestKindImage ManifestKind = "image" + ManifestKindAttestation ManifestKind = "attestation" + ManifestKindUnknown ManifestKind = "unknown" +) + +type ManifestSummary struct { + // ID is the content-addressable ID of an image and is the same as the + // digest of the image manifest. + // + // Required: true + ID string `json:"ID"` + + // Descriptor is the OCI descriptor of the image. + // + // Required: true + Descriptor ocispec.Descriptor `json:"Descriptor"` + + // Indicates whether all the child content (image config, layers) is + // fully available locally + // + // Required: true + Available bool `json:"Available"` + + // Size is the size information of the content related to this manifest. + // Note: These sizes only take the locally available content into account. + // + // Required: true + Size struct { + // Content is the size (in bytes) of all the locally present + // content in the content store (e.g. image config, layers) + // referenced by this manifest and its children. + // This only includes blobs in the content store. + Content int64 `json:"Content"` + + // Total is the total size (in bytes) of all the locally present + // data (both distributable and non-distributable) that's related to + // this manifest and its children. + // This equal to the sum of [Content] size AND all the sizes in the + // [Size] struct present in the Kind-specific data struct. + // For example, for an image kind (Kind == ManifestKindImage), + // this would include the size of the image content and unpacked + // image snapshots ([Size.Content] + [ImageData.Size.Unpacked]). + Total int64 `json:"Total"` + } `json:"Size"` + + // Kind is the kind of the image manifest. + // + // Required: true + Kind ManifestKind `json:"Kind"` + + // Fields below are specific to the kind of the image manifest. + + // Present only if Kind == ManifestKindImage. + ImageData *ImageProperties `json:"ImageData,omitempty"` + + // Present only if Kind == ManifestKindAttestation. + AttestationData *AttestationProperties `json:"AttestationData,omitempty"` +} + +type ImageProperties struct { + // Platform is the OCI platform object describing the platform of the image. + // + // Required: true + Platform ocispec.Platform `json:"Platform"` + + Size struct { + // Unpacked is the size (in bytes) of the locally unpacked + // (uncompressed) image content that's directly usable by the containers + // running this image. + // It's independent of the distributable content - e.g. + // the image might still have an unpacked data that's still used by + // some container even when the distributable/compressed content is + // already gone. + // + // Required: true + Unpacked int64 `json:"Unpacked"` + } + + // Containers is an array containing the IDs of the containers that are + // using this image. + // + // Required: true + Containers []string `json:"Containers"` +} + +type AttestationProperties struct { + // For is the digest of the image manifest that this attestation is for. + For digest.Digest `json:"For"` +} diff --git a/vendor/github.com/docker/docker/api/types/image/opts.go b/vendor/github.com/docker/docker/api/types/image/opts.go index 8e32c9af86..923ebe5a06 100644 --- a/vendor/github.com/docker/docker/api/types/image/opts.go +++ b/vendor/github.com/docker/docker/api/types/image/opts.go @@ -76,6 +76,9 @@ type ListOptions struct { // ContainerCount indicates whether container count should be computed. ContainerCount bool + + // Manifests indicates whether the image manifests should be returned. + Manifests bool } // RemoveOptions holds parameters to remove images. diff --git a/vendor/github.com/docker/docker/api/types/image/summary.go b/vendor/github.com/docker/docker/api/types/image/summary.go index f1e3e2ef01..e87e216a28 100644 --- a/vendor/github.com/docker/docker/api/types/image/summary.go +++ b/vendor/github.com/docker/docker/api/types/image/summary.go @@ -1,10 +1,5 @@ package image -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Summary summary -// swagger:model Summary type Summary struct { // Number of containers using this image. Includes both stopped and running @@ -17,7 +12,7 @@ type Summary struct { Containers int64 `json:"Containers"` // Date and time at which the image was created as a Unix timestamp - // (number of seconds sinds EPOCH). + // (number of seconds since EPOCH). // // Required: true Created int64 `json:"Created"` @@ -47,6 +42,14 @@ type Summary struct { // Required: true ParentID string `json:"ParentId"` + // Manifests is a list of image manifests available in this image. It + // provides a more detailed view of the platform-specific image manifests or + // other image-attached data like build attestations. + // + // WARNING: This is experimental and may change at any time without any backward + // compatibility. + Manifests []ManifestSummary `json:"Manifests,omitempty"` + // List of content-addressable digests of locally available image manifests // that the image is referenced from. Multiple manifests can refer to the // same image. diff --git a/vendor/github.com/docker/docker/api/types/registry/authconfig.go b/vendor/github.com/docker/docker/api/types/registry/authconfig.go index 97a924e374..8e383f6e60 100644 --- a/vendor/github.com/docker/docker/api/types/registry/authconfig.go +++ b/vendor/github.com/docker/docker/api/types/registry/authconfig.go @@ -34,10 +34,9 @@ type AuthConfig struct { } // EncodeAuthConfig serializes the auth configuration as a base64url encoded -// RFC4648, section 5) JSON string for sending through the X-Registry-Auth header. +// ([RFC4648, section 5]) JSON string for sending through the X-Registry-Auth header. // -// For details on base64url encoding, see: -// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5 +// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5 func EncodeAuthConfig(authConfig AuthConfig) (string, error) { buf, err := json.Marshal(authConfig) if err != nil { @@ -46,15 +45,14 @@ func EncodeAuthConfig(authConfig AuthConfig) (string, error) { return base64.URLEncoding.EncodeToString(buf), nil } -// DecodeAuthConfig decodes base64url encoded (RFC4648, section 5) JSON +// DecodeAuthConfig decodes base64url encoded ([RFC4648, section 5]) JSON // authentication information as sent through the X-Registry-Auth header. // -// This function always returns an AuthConfig, even if an error occurs. It is up +// This function always returns an [AuthConfig], even if an error occurs. It is up // to the caller to decide if authentication is required, and if the error can // be ignored. // -// For details on base64url encoding, see: -// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5 +// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5 func DecodeAuthConfig(authEncoded string) (*AuthConfig, error) { if authEncoded == "" { return &AuthConfig{}, nil @@ -69,7 +67,7 @@ func DecodeAuthConfig(authEncoded string) (*AuthConfig, error) { // clients and API versions. Current clients and API versions expect authentication // to be provided through the X-Registry-Auth header. // -// Like DecodeAuthConfig, this function always returns an AuthConfig, even if an +// Like [DecodeAuthConfig], this function always returns an [AuthConfig], even if an // error occurs. It is up to the caller to decide if authentication is required, // and if the error can be ignored. func DecodeAuthConfigBody(rdr io.ReadCloser) (*AuthConfig, error) { diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go index 3eae4b9b29..1b4be6fffb 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -122,7 +122,7 @@ type CAConfig struct { SigningCAKey string `json:",omitempty"` // If this value changes, and there is no specified signing cert and key, - // then the swarm is forced to generate a new root certificate ane key. + // then the swarm is forced to generate a new root certificate and key. ForceRotate uint64 `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/system/info.go b/vendor/github.com/docker/docker/api/types/system/info.go index 6791cf3284..c66a2afb8b 100644 --- a/vendor/github.com/docker/docker/api/types/system/info.go +++ b/vendor/github.com/docker/docker/api/types/system/info.go @@ -77,9 +77,6 @@ type Info struct { Containerd *ContainerdInfo `json:",omitempty"` - // Legacy API fields for older API versions. - legacyFields - // Warnings contains a slice of warnings that occurred while collecting // system information. These warnings are intended to be informational // messages for the user, and are not intended to be parsed / used for @@ -124,10 +121,6 @@ type ContainerdNamespaces struct { Plugins string } -type legacyFields struct { - ExecutionDriver string `json:",omitempty"` // Deprecated: deprecated since API v1.25, but returned for older versions. -} - // PluginsInfo is a temp struct holding Plugins name // registered with docker daemon. It is used by [Info] struct type PluginsInfo struct { diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index fe99b74392..ea55813e63 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -484,4 +484,6 @@ type BuildCachePruneOptions struct { All bool KeepStorage int64 Filters filters.Args + + // FIXME(thaJeztah): add new options; see https://github.com/moby/moby/issues/48639 } diff --git a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go index bbd9ff0b8f..618a481620 100644 --- a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go +++ b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go @@ -414,7 +414,7 @@ type Info struct { // the Volume has not been successfully created yet. VolumeID string `json:",omitempty"` - // AccessibleTopolgoy is the topology this volume is actually accessible + // AccessibleTopology is the topology this volume is actually accessible // from. AccessibleTopology []Topology `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/builder.go b/vendor/github.com/docker/docker/builder/dockerfile/builder.go index be03511a3f..9ad139b1f2 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/builder.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/builder.go @@ -8,8 +8,8 @@ import ( "sort" "strings" - "github.com/containerd/containerd/platforms" "github.com/containerd/log" + "github.com/containerd/platforms" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/container" @@ -228,7 +228,7 @@ func emitImageID(aux *streamformatter.AuxFormatter, state *dispatchState) error func processMetaArg(meta instructions.ArgCommand, shlex *shell.Lex, args *BuildArgs) error { // shell.Lex currently only support the concatenated string format - envs := convertMapToEnvList(args.GetAllAllowed()) + envs := shell.EnvsFromSlice(convertMapToEnvList(args.GetAllAllowed())) if err := meta.Expand(func(word string) (string, error) { newword, _, err := shlex.ProcessWord(word, envs) return newword, err diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go index c302eeebd5..fe35dd206a 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go @@ -15,7 +15,7 @@ import ( "sort" "strings" - "github.com/containerd/containerd/platforms" + "github.com/containerd/platforms" "github.com/docker/docker/api" "github.com/docker/docker/api/types/strslice" "github.com/docker/docker/builder" @@ -224,7 +224,7 @@ func (d *dispatchRequest) getExpandedString(shlex *shell.Lex, str string) (strin substitutionArgs = append(substitutionArgs, key+"="+value) } - name, _, err := shlex.ProcessWord(str, substitutionArgs) + name, _, err := shlex.ProcessWord(str, shell.EnvsFromSlice(substitutionArgs)) if err != nil { return "", err } @@ -508,7 +508,7 @@ func dispatchEntrypoint(ctx context.Context, d dispatchRequest, c *instructions. // // Expose ports for links and port mappings. This all ends up in // req.runConfig.ExposedPorts for runconfig. -func dispatchExpose(ctx context.Context, d dispatchRequest, c *instructions.ExposeCommand, envs []string) error { +func dispatchExpose(ctx context.Context, d dispatchRequest, c *instructions.ExposeCommand, envs shell.EnvGetter) error { // custom multi word expansion // expose $FOO with FOO="80 443" is expanded as EXPOSE [80,443]. This is the only command supporting word to words expansion // so the word processing has been de-generalized diff --git a/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go b/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go index 2bf74ed07f..cc23bf0a52 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go @@ -43,7 +43,7 @@ func dispatch(ctx context.Context, d dispatchRequest, cmd instructions.Command) } } runConfigEnv := d.state.runConfig.Env - envs := append(runConfigEnv, d.state.buildArgs.FilterAllowed(runConfigEnv)...) + envs := shell.EnvsFromSlice(append(runConfigEnv, d.state.buildArgs.FilterAllowed(runConfigEnv)...)) if ex, ok := cmd.(instructions.SupportsSingleWordExpansion); ok { err := ex.Expand(func(word string) (string, error) { diff --git a/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go b/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go index e943c22951..97e1146a7a 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go @@ -4,8 +4,8 @@ import ( "context" "runtime" - "github.com/containerd/containerd/platforms" "github.com/containerd/log" + "github.com/containerd/platforms" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/builder" dockerimage "github.com/docker/docker/image" diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals.go b/vendor/github.com/docker/docker/builder/dockerfile/internals.go index 2439efe37b..fada8a65f2 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/internals.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals.go @@ -10,8 +10,8 @@ import ( "fmt" "strings" - "github.com/containerd/containerd/platforms" "github.com/containerd/log" + "github.com/containerd/platforms" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/container" diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals_linux.go b/vendor/github.com/docker/docker/builder/dockerfile/internals_linux.go index 4af7376264..694e129f75 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/internals_linux.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals_linux.go @@ -27,25 +27,25 @@ func parseChownFlag(ctx context.Context, builder *Builder, state *dispatchState, passwdPath, err := symlink.FollowSymlinkInScope(filepath.Join(ctrRootPath, "etc", "passwd"), ctrRootPath) if err != nil { - return idtools.Identity{}, errors.Wrapf(err, "can't resolve /etc/passwd path in container rootfs") + return idtools.Identity{}, errors.Wrap(err, "can't resolve /etc/passwd path in container rootfs") } groupPath, err := symlink.FollowSymlinkInScope(filepath.Join(ctrRootPath, "etc", "group"), ctrRootPath) if err != nil { - return idtools.Identity{}, errors.Wrapf(err, "can't resolve /etc/group path in container rootfs") + return idtools.Identity{}, errors.Wrap(err, "can't resolve /etc/group path in container rootfs") } uid, err := lookupUser(userStr, passwdPath) if err != nil { - return idtools.Identity{}, errors.Wrapf(err, "can't find uid for user "+userStr) + return idtools.Identity{}, errors.Wrap(err, "can't find uid for user "+userStr) } gid, err := lookupGroup(grpStr, groupPath) if err != nil { - return idtools.Identity{}, errors.Wrapf(err, "can't find gid for group "+grpStr) + return idtools.Identity{}, errors.Wrap(err, "can't find gid for group "+grpStr) } // convert as necessary because of user namespaces chownPair, err := identityMapping.ToHost(idtools.Identity{UID: uid, GID: gid}) if err != nil { - return idtools.Identity{}, errors.Wrapf(err, "unable to convert uid/gid to host mapping") + return idtools.Identity{}, errors.Wrap(err, "unable to convert uid/gid to host mapping") } return chownPair, nil } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go b/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go index f79f8e16e1..9be0868312 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go @@ -7,7 +7,7 @@ import ( "path/filepath" "strings" - "github.com/containerd/containerd/platforms" + "github.com/containerd/platforms" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/mount" "github.com/docker/docker/errdefs" diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index 60d91bc65b..46832d8a44 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -2,7 +2,7 @@ Package client is a Go client for the Docker Engine API. For more information about the Engine API, see the documentation: -https://docs.docker.com/engine/api/ +https://docs.docker.com/reference/api/engine/ # Usage @@ -247,6 +247,14 @@ func (cli *Client) tlsConfig() *tls.Config { func defaultHTTPClient(hostURL *url.URL) (*http.Client, error) { transport := &http.Transport{} + // Necessary to prevent long-lived processes using the + // client from leaking connections due to idle connections + // not being released. + // TODO: see if we can also address this from the server side, + // or in go-connections. + // see: https://github.com/moby/moby/issues/45539 + transport.MaxIdleConns = 6 + transport.IdleConnTimeout = 30 * time.Second err := sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host) if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go index a9cc1e21e5..bef679431d 100644 --- a/vendor/github.com/docker/docker/client/image_list.go +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -11,6 +11,11 @@ import ( ) // ImageList returns a list of images in the docker host. +// +// Experimental: Setting the [options.Manifest] will populate +// [image.Summary.Manifests] with information about image manifests. +// This is experimental and might change in the future without any backward +// compatibility. func (cli *Client) ImageList(ctx context.Context, options image.ListOptions) ([]image.Summary, error) { var images []image.Summary @@ -47,6 +52,9 @@ func (cli *Client) ImageList(ctx context.Context, options image.ListOptions) ([] if options.SharedSize && versions.GreaterThanOrEqualTo(cli.version, "1.42") { query.Set("shared-size", "1") } + if options.Manifests && versions.GreaterThanOrEqualTo(cli.version, "1.47") { + query.Set("manifests", "1") + } serverResp, err := cli.get(ctx, "/images/json", query, nil) defer ensureReaderClosed(serverResp) diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go index bf3e9b1cd6..7c43268b3a 100644 --- a/vendor/github.com/docker/docker/client/ping.go +++ b/vendor/github.com/docker/docker/client/ping.go @@ -56,8 +56,8 @@ func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) { err := cli.checkResponseErr(resp) return ping, errdefs.FromStatusCode(err, resp.statusCode) } - ping.APIVersion = resp.header.Get("API-Version") - ping.OSType = resp.header.Get("OSType") + ping.APIVersion = resp.header.Get("Api-Version") + ping.OSType = resp.header.Get("Ostype") if resp.header.Get("Docker-Experimental") == "true" { ping.Experimental = true } diff --git a/vendor/github.com/docker/docker/container/container.go b/vendor/github.com/docker/docker/container/container.go index dfc056a427..d9c5bb736c 100644 --- a/vendor/github.com/docker/docker/container/container.go +++ b/vendor/github.com/docker/docker/container/container.go @@ -30,7 +30,6 @@ import ( "github.com/docker/docker/layer" libcontainerdtypes "github.com/docker/docker/libcontainerd/types" "github.com/docker/docker/oci" - "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/restartmanager" @@ -326,7 +325,7 @@ func (container *Container) SetupWorkingDirectory(rootIdentity idtools.Identity) } // GetResourcePath evaluates `path` in the scope of the container's BaseFS, with proper path -// sanitisation. Symlinks are all scoped to the BaseFS of the container, as +// sanitization. Symlinks are all scoped to the BaseFS of the container, as // though the container's BaseFS was `/`. // // The BaseFS of a container is the host-facing path which is bind-mounted as @@ -345,7 +344,7 @@ func (container *Container) GetResourcePath(path string) (string, error) { } // IMPORTANT - These are paths on the OS where the daemon is running, hence // any filepath operations must be done in an OS-agnostic way. - r, e := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, containerfs.CleanScopedPath(path)), container.BaseFS) + r, e := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, cleanScopedPath(path)), container.BaseFS) // Log this here on the daemon side as there's otherwise no indication apart // from the error being propagated all the way back to the client. This makes @@ -356,8 +355,20 @@ func (container *Container) GetResourcePath(path string) (string, error) { return r, e } +// cleanScopedPath prepares the given path to be combined with a mount path or +// a drive-letter. On Windows, it removes any existing driveletter (e.g. "C:"). +// The returned path is always prefixed with a [filepath.Separator]. +func cleanScopedPath(path string) string { + if len(path) >= 2 { + if v := filepath.VolumeName(path); len(v) > 0 { + path = path[len(v):] + } + } + return filepath.Join(string(filepath.Separator), path) +} + // GetRootResourcePath evaluates `path` in the scope of the container's root, with proper path -// sanitisation. Symlinks are all scoped to the root of the container, as +// sanitization. Symlinks are all scoped to the root of the container, as // though the container's root was `/`. // // The root of a container is the host-facing configuration metadata directory. diff --git a/vendor/github.com/docker/docker/container/stream/streams.go b/vendor/github.com/docker/docker/container/stream/streams.go index 78ec048396..b64e3a3969 100644 --- a/vendor/github.com/docker/docker/container/stream/streams.go +++ b/vendor/github.com/docker/docker/container/stream/streams.go @@ -2,6 +2,7 @@ package stream // import "github.com/docker/docker/container/stream" import ( "context" + "errors" "fmt" "io" "strings" @@ -91,24 +92,24 @@ func (c *Config) NewNopInputPipe() { // CloseStreams ensures that the configured streams are properly closed. func (c *Config) CloseStreams() error { - var errors []string + var errs []string if c.stdin != nil { if err := c.stdin.Close(); err != nil { - errors = append(errors, fmt.Sprintf("error close stdin: %s", err)) + errs = append(errs, fmt.Sprintf("error close stdin: %s", err)) } } if err := c.stdout.Clean(); err != nil { - errors = append(errors, fmt.Sprintf("error close stdout: %s", err)) + errs = append(errs, fmt.Sprintf("error close stdout: %s", err)) } if err := c.stderr.Clean(); err != nil { - errors = append(errors, fmt.Sprintf("error close stderr: %s", err)) + errs = append(errs, fmt.Sprintf("error close stderr: %s", err)) } - if len(errors) > 0 { - return fmt.Errorf(strings.Join(errors, "\n")) + if len(errs) > 0 { + return errors.New(strings.Join(errs, "\n")) } return nil diff --git a/vendor/github.com/docker/docker/container/view.go b/vendor/github.com/docker/docker/container/view.go index 0f77aa58ca..1e50a138fd 100644 --- a/vendor/github.com/docker/docker/container/view.go +++ b/vendor/github.com/docker/docker/container/view.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.21 +//go:build go1.22 package container // import "github.com/docker/docker/container" @@ -30,8 +30,12 @@ const ( var ( // ErrNameReserved is an error which is returned when a name is requested to be reserved that already is reserved + // + // Deprecated: check for [errdefs.Conflict] errors instead (using [errdefs.IsConflict]. ErrNameReserved = errors.New("name is reserved") // ErrNameNotReserved is an error which is returned when trying to find a name that is not reserved + // + // Deprecated: check for [errdefs.NotFound] errors instead (using [errdefs.IsNotFound]. ErrNameNotReserved = errors.New("name is not reserved") ) @@ -112,6 +116,7 @@ func NewViewDB() (*ViewDB, error) { // GetByPrefix returns a container with the given ID prefix. It returns an // error if an empty prefix was given or if multiple containers match the prefix. +// It returns an [errdefs.NotFound] if the given s yielded no results. func (db *ViewDB) GetByPrefix(s string) (string, error) { if s == "" { return "", errdefs.InvalidParameter(errors.New("prefix can't be empty")) @@ -152,7 +157,7 @@ func (db *ViewDB) withTxn(cb func(*memdb.Txn) error) error { err := cb(txn) if err != nil { txn.Abort() - return errdefs.System(err) + return err } txn.Commit() return nil @@ -183,10 +188,9 @@ func (db *ViewDB) Delete(c *Container) error { }) } -// ReserveName registers a container ID to a name -// ReserveName is idempotent -// Attempting to reserve a container ID to a name that already exists results in an `ErrNameReserved` -// A name reservation is globally unique +// ReserveName registers a container ID to a name. ReserveName is idempotent, +// but returns an [errdefs.Conflict] when attempting to reserve a container ID +// to a name that already is reserved. func (db *ViewDB) ReserveName(name, containerID string) error { return db.withTxn(func(txn *memdb.Txn) error { s, err := txn.First(memdbNamesTable, memdbIDIndex, name) @@ -195,7 +199,7 @@ func (db *ViewDB) ReserveName(name, containerID string) error { } if s != nil { if s.(nameAssociation).containerID != containerID { - return ErrNameReserved + return errdefs.Conflict(ErrNameReserved) //nolint:staticcheck // ignore SA1019: ErrNameReserved is deprecated. } return nil } @@ -235,6 +239,7 @@ func (v *View) All() ([]Snapshot, error) { } // Get returns an item by id. Returned objects must never be modified. +// It returns an [errdefs.NotFound] if the given id was not found. func (v *View) Get(id string) (*Snapshot, error) { s, err := v.txn.First(memdbContainersTable, memdbIDIndex, id) if err != nil { @@ -266,13 +271,14 @@ func (v *View) getNames(containerID string) []string { } // GetID returns the container ID that the passed in name is reserved to. +// It returns an [errdefs.NotFound] if the given id was not found. func (v *View) GetID(name string) (string, error) { s, err := v.txn.First(memdbNamesTable, memdbIDIndex, name) if err != nil { return "", errdefs.System(err) } if s == nil { - return "", ErrNameNotReserved + return "", errdefs.NotFound(ErrNameNotReserved) //nolint:staticcheck // ignore SA1019: ErrNameNotReserved is deprecated. } return s.(nameAssociation).containerID, nil } diff --git a/vendor/github.com/docker/docker/daemon/logger/adapter.go b/vendor/github.com/docker/docker/daemon/logger/adapter.go index 95ed5a859e..bd5cf5f226 100644 --- a/vendor/github.com/docker/docker/daemon/logger/adapter.go +++ b/vendor/github.com/docker/docker/daemon/logger/adapter.go @@ -87,7 +87,7 @@ type pluginAdapterWithRead struct { *pluginAdapter } -func (a *pluginAdapterWithRead) ReadLogs(config ReadConfig) *LogWatcher { +func (a *pluginAdapterWithRead) ReadLogs(ctx context.Context, config ReadConfig) *LogWatcher { watcher := NewLogWatcher() go func() { @@ -101,6 +101,10 @@ func (a *pluginAdapterWithRead) ReadLogs(config ReadConfig) *LogWatcher { dec := logdriver.NewLogEntryDecoder(stream) for { + if ctx.Err() != nil { + return + } + var buf logdriver.LogEntry if err := dec.Decode(&buf); err != nil { if err == io.EOF { @@ -127,6 +131,8 @@ func (a *pluginAdapterWithRead) ReadLogs(config ReadConfig) *LogWatcher { // send the message unless the consumer is gone select { case watcher.Msg <- msg: + case <-ctx.Done(): + return case <-watcher.WatchConsumerGone(): return } diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go index bea8ceedb3..6627074fe2 100644 --- a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go @@ -12,10 +12,12 @@ import ( "github.com/docker/docker/pkg/tailfile" ) +var _ logger.LogReader = (*JSONFileLogger)(nil) + // ReadLogs implements the logger's LogReader interface for the logs // created by this driver. -func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { - return l.writer.ReadLogs(config) +func (l *JSONFileLogger) ReadLogs(ctx context.Context, config logger.ReadConfig) *logger.LogWatcher { + return l.writer.ReadLogs(ctx, config) } func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { @@ -79,6 +81,6 @@ func decodeFunc(rdr io.Reader) loggerutils.Decoder { } } -func getTailReader(ctx context.Context, r loggerutils.SizeReaderAt, req int) (io.Reader, int, error) { +func getTailReader(ctx context.Context, r loggerutils.SizeReaderAt, req int) (loggerutils.SizeReaderAt, int, error) { return tailfile.NewTailReader(ctx, r, req) } diff --git a/vendor/github.com/docker/docker/daemon/logger/local/read.go b/vendor/github.com/docker/docker/daemon/logger/local/read.go index cb5f9f0cd3..14771a52db 100644 --- a/vendor/github.com/docker/docker/daemon/logger/local/read.go +++ b/vendor/github.com/docker/docker/daemon/logger/local/read.go @@ -18,11 +18,11 @@ import ( // logger.defaultBufSize caps the size of Line field. const maxMsgLen int = 1e6 // 1MB. -func (d *driver) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { - return d.logfile.ReadLogs(config) +func (d *driver) ReadLogs(ctx context.Context, config logger.ReadConfig) *logger.LogWatcher { + return d.logfile.ReadLogs(ctx, config) } -func getTailReader(ctx context.Context, r loggerutils.SizeReaderAt, req int) (io.Reader, int, error) { +func getTailReader(ctx context.Context, r loggerutils.SizeReaderAt, req int) (loggerutils.SizeReaderAt, int, error) { size := r.Size() if req < 0 { return nil, 0, errdefs.InvalidParameter(errors.Errorf("invalid number of lines to tail: %d", req)) diff --git a/vendor/github.com/docker/docker/daemon/logger/logger.go b/vendor/github.com/docker/docker/daemon/logger/logger.go index d3e9da1053..868ac8b168 100644 --- a/vendor/github.com/docker/docker/daemon/logger/logger.go +++ b/vendor/github.com/docker/docker/daemon/logger/logger.go @@ -8,6 +8,7 @@ package logger // import "github.com/docker/docker/daemon/logger" import ( + "context" "sync" "time" @@ -88,7 +89,7 @@ type ReadConfig struct { // LogReader is the interface for reading log messages for loggers that support reading. type LogReader interface { // ReadLogs reads logs from underlying logging backend. - ReadLogs(ReadConfig) *LogWatcher + ReadLogs(context.Context, ReadConfig) *LogWatcher } // LogWatcher is used when consuming logs read from the LogReader interface. diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/cache/local_cache.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/cache/local_cache.go index d5adfd4ffa..3466c1e79f 100644 --- a/vendor/github.com/docker/docker/daemon/logger/loggerutils/cache/local_cache.go +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/cache/local_cache.go @@ -24,6 +24,8 @@ var builtInCacheLogOpts = map[string]bool{ cacheDisabledKey: true, } +var _ logger.LogReader = (*loggerWithCache)(nil) + // WithLocalCache wraps the passed in logger with a logger caches all writes locally // in addition to writing to the passed in logger. func WithLocalCache(l logger.Logger, info logger.Info) (logger.Logger, error) { @@ -85,8 +87,8 @@ func (l *loggerWithCache) Name() string { return l.l.Name() } -func (l *loggerWithCache) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { - return l.cache.(logger.LogReader).ReadLogs(config) +func (l *loggerWithCache) ReadLogs(ctx context.Context, config logger.ReadConfig) *logger.LogWatcher { + return l.cache.(logger.LogReader).ReadLogs(ctx, config) } func (l *loggerWithCache) Close() error { diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/follow.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/follow.go index 6131bcea7c..7a6b8e50f0 100644 --- a/vendor/github.com/docker/docker/daemon/logger/loggerutils/follow.go +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/follow.go @@ -22,8 +22,8 @@ type follow struct { } // Do follows the log file as it is written, starting from f at read. -func (fl *follow) Do(f *os.File, read logPos) { - fl.log = log.G(context.TODO()).WithFields(log.Fields{ +func (fl *follow) Do(ctx context.Context, f *os.File, read logPos) { + fl.log = log.G(ctx).WithFields(log.Fields{ "module": "logger", "file": f.Name(), }) @@ -38,7 +38,7 @@ func (fl *follow) Do(f *os.File, read logPos) { }() for { - wrote, ok := fl.nextPos(read) + wrote, ok := fl.nextPos(ctx, read) if !ok { return } @@ -49,7 +49,7 @@ func (fl *follow) Do(f *os.File, read logPos) { fl.Watcher.Err <- err return } - if !fl.forward(f) { + if !fl.forward(ctx, f) { return } @@ -91,7 +91,7 @@ func (fl *follow) Do(f *os.File, read logPos) { read.size = 0 } - if !fl.forward(io.NewSectionReader(f, read.size, wrote.size-read.size)) { + if !fl.forward(ctx, io.NewSectionReader(f, read.size, wrote.size-read.size)) { return } read = wrote @@ -100,9 +100,11 @@ func (fl *follow) Do(f *os.File, read logPos) { // nextPos waits until the write position of the LogFile being followed has // advanced from current and returns the new position. -func (fl *follow) nextPos(current logPos) (next logPos, ok bool) { +func (fl *follow) nextPos(ctx context.Context, current logPos) (next logPos, ok bool) { var st logReadState select { + case <-ctx.Done(): + return current, false case <-fl.Watcher.WatchConsumerGone(): return current, false case st = <-fl.LogFile.read: @@ -135,7 +137,7 @@ func (fl *follow) nextPos(current logPos) (next logPos, ok bool) { // forward decodes log messages from r and forwards them to the log watcher. // // The return value, cont, signals whether following should continue. -func (fl *follow) forward(r io.Reader) (cont bool) { +func (fl *follow) forward(ctx context.Context, r io.Reader) (cont bool) { fl.Decoder.Reset(r) - return fl.Forwarder.Do(fl.Watcher, fl.Decoder) + return fl.Forwarder.Do(ctx, fl.Watcher, fl.Decoder.Decode) } diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go index 61490c8d1a..4f2ad8bbba 100644 --- a/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go @@ -1,3 +1,6 @@ +// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: +//go:build go1.22 + package loggerutils // import "github.com/docker/docker/daemon/logger/loggerutils" import ( @@ -9,14 +12,18 @@ import ( "io/fs" "math" "os" + "slices" "strconv" "sync" "time" + "github.com/containerd/containerd/tracing" "github.com/containerd/log" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/pkg/pools" "github.com/pkg/errors" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" ) // rotateFileMetadata is a metadata of the gzip header of the compressed log file @@ -93,7 +100,7 @@ type Decoder interface { // Reset resets the decoder // Reset is called for certain events, such as log rotations Reset(io.Reader) - // Decode decodes the next log messeage from the stream + // Decode decodes the next log message from the stream Decode() (*logger.Message, error) // Close signals to the decoder that it can release whatever resources it was using. Close() @@ -107,16 +114,11 @@ type SizeReaderAt interface { Size() int64 } -type readAtCloser interface { - io.ReaderAt - io.Closer -} - // GetTailReaderFunc is used to truncate a reader to only read as much as is required // in order to get the passed in number of log lines. // It returns the sectioned reader, the number of lines that the section reader // contains, and any error that occurs. -type GetTailReaderFunc func(ctx context.Context, f SizeReaderAt, nLogLines int) (rdr io.Reader, nLines int, err error) +type GetTailReaderFunc func(ctx context.Context, f SizeReaderAt, nLogLines int) (rdr SizeReaderAt, nLines int, err error) // NewLogFile creates new LogFile func NewLogFile(logPath string, capacity int64, maxFiles int, compress bool, decodeFunc MakeDecoderFn, perms os.FileMode, getTailReader GetTailReaderFunc) (*LogFile, error) { @@ -377,7 +379,12 @@ func (w *LogFile) Close() error { // ReadLogs decodes entries from log files. // // It is the caller's responsibility to call ConsumerGone on the LogWatcher. -func (w *LogFile) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { +func (w *LogFile) ReadLogs(ctx context.Context, config logger.ReadConfig) *logger.LogWatcher { + ctx, span := tracing.StartSpan(ctx, "logger.LogFile.ReadLogs") + defer span.End() + + span.SetAttributes(tracing.Attribute("config", config)) + watcher := logger.NewLogWatcher() // Lock out filesystem operations so that we can capture the read // position and atomically open the corresponding log file, without the @@ -389,19 +396,104 @@ func (w *LogFile) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { st := <-w.read pos := st.pos w.read <- st - go w.readLogsLocked(pos, config, watcher) + go w.readLogsLocked(ctx, pos, config, watcher) return watcher } +// tailFiles must be called with w.fsopMu locked for reads. +// w.fsopMu.RUnlock() is called before returning. +func (w *LogFile) tailFiles(ctx context.Context, config logger.ReadConfig, watcher *logger.LogWatcher, current SizeReaderAt, dec Decoder, fwd *forwarder) (cont bool) { + if config.Tail == 0 { + w.fsopMu.RUnlock() + return true + } + + ctx, span := tracing.StartSpan(ctx, "logger.Logfile.TailLogs") + defer func() { + span.SetAttributes(attribute.Bool("continue", cont)) + span.End() + }() + + files, err := w.openRotatedFiles(ctx, config) + w.fsopMu.RUnlock() + + if err != nil { + // TODO: Should we allow this to continue (as in set `cont=true`) and not error out the log stream? + err = errors.Wrap(err, "error opening rotated log files") + span.SetStatus(err) + watcher.Err <- err + return false + } + + if current.Size() > 0 { + files = append(files, &sizeReaderAtOpener{current, "current"}) + } + + return tailFiles(ctx, files, watcher, dec, w.getTailReader, config.Tail, fwd) +} + +type sizeReaderAtOpener struct { + SizeReaderAt + ref string +} + +func (o *sizeReaderAtOpener) ReaderAt(context.Context) (sizeReaderAtCloser, error) { + return &sizeReaderAtWithCloser{o, nil}, nil +} + +func (o *sizeReaderAtOpener) Close() {} + +func (o *sizeReaderAtOpener) Ref() string { + return o.ref +} + +type sizeReaderAtWithCloser struct { + SizeReaderAt + close func() error +} + +func (r *sizeReaderAtWithCloser) ReadAt(p []byte, offset int64) (int, error) { + if r.SizeReaderAt == nil { + return 0, io.EOF + } + return r.SizeReaderAt.ReadAt(p, offset) +} + +func (r *sizeReaderAtWithCloser) Read(p []byte) (int, error) { + if r.SizeReaderAt == nil { + return 0, io.EOF + } + return r.SizeReaderAt.Read(p) +} + +func (r *sizeReaderAtWithCloser) Size() int64 { + if r.SizeReaderAt == nil { + return 0 + } + return r.SizeReaderAt.Size() +} + +func (r *sizeReaderAtWithCloser) Close() error { + if r.close != nil { + return r.close() + } + return nil +} + // readLogsLocked is the bulk of the implementation of ReadLogs. // // w.fsopMu must be locked for reading when calling this method. // w.fsopMu.RUnlock() is called before returning. -func (w *LogFile) readLogsLocked(currentPos logPos, config logger.ReadConfig, watcher *logger.LogWatcher) { +func (w *LogFile) readLogsLocked(ctx context.Context, currentPos logPos, config logger.ReadConfig, watcher *logger.LogWatcher) { + ctx, span := tracing.StartSpan(ctx, "logger.Logfile.ReadLogsLocked") + defer span.End() + defer close(watcher.Msg) currentFile, err := open(w.f.Name()) if err != nil { + w.fsopMu.RUnlock() + span.SetStatus(err) watcher.Err <- err return } @@ -410,53 +502,13 @@ func (w *LogFile) readLogsLocked(currentPos logPos, config logger.ReadConfig, wa dec := w.createDecoder(nil) defer dec.Close() - currentChunk := io.NewSectionReader(currentFile, 0, currentPos.size) fwd := newForwarder(config) - if config.Tail != 0 { - // TODO(@cpuguy83): Instead of opening every file, only get the files which - // are needed to tail. - // This is especially costly when compression is enabled. - files, err := w.openRotatedFiles(config) - if err != nil { - watcher.Err <- err - return - } - - closeFiles := func() { - for _, f := range files { - f.Close() - } - } - - readers := make([]SizeReaderAt, 0, len(files)+1) - for _, f := range files { - switch ff := f.(type) { - case SizeReaderAt: - readers = append(readers, ff) - case interface{ Stat() (fs.FileInfo, error) }: - stat, err := ff.Stat() - if err != nil { - watcher.Err <- errors.Wrap(err, "error reading size of rotated file") - closeFiles() - return - } - readers = append(readers, io.NewSectionReader(f, 0, stat.Size())) - default: - panic(fmt.Errorf("rotated file value %#v (%[1]T) has neither Size() nor Stat() methods", f)) - } - } - if currentChunk.Size() > 0 { - readers = append(readers, currentChunk) - } + // At this point, w.tailFiles is responsible for unlocking w.fsopmu + ok := w.tailFiles(ctx, config, watcher, io.NewSectionReader(currentFile, 0, currentPos.size), dec, fwd) - ok := tailFiles(readers, watcher, dec, w.getTailReader, config.Tail, fwd) - closeFiles() - if !ok { - return - } - } else { - w.fsopMu.RUnlock() + if !ok { + return } if !config.Follow { @@ -468,117 +520,265 @@ func (w *LogFile) readLogsLocked(currentPos logPos, config logger.ReadConfig, wa Watcher: watcher, Decoder: dec, Forwarder: fwd, - }).Do(currentFile, currentPos) + }).Do(ctx, currentFile, currentPos) +} + +type fileOpener interface { + ReaderAt(context.Context) (ra sizeReaderAtCloser, err error) + Close() + Ref() string +} + +// simpleFileOpener just holds a reference to an already open file +type simpleFileOpener struct { + f *os.File + sz int64 + closed bool +} + +func (o *simpleFileOpener) ReaderAt(context.Context) (sizeReaderAtCloser, error) { + if o.closed { + return nil, errors.New("file is closed") + } + + if o.sz == 0 { + stat, err := o.f.Stat() + if err != nil { + return nil, errors.Wrap(err, "error stating file") + } + o.sz = stat.Size() + } + return &sizeReaderAtWithCloser{io.NewSectionReader(o.f, 0, o.sz), nil}, nil +} + +func (o *simpleFileOpener) Ref() string { + return o.f.Name() +} + +func (o *simpleFileOpener) Close() { + _ = o.f.Close() + o.closed = true +} + +// converter function used by shareTempFileConverter +func decompress(dst io.WriteSeeker, src io.ReadSeeker) error { + if _, err := src.Seek(0, io.SeekStart); err != nil { + return err + } + rc, err := gzip.NewReader(src) + if err != nil { + return err + } + _, err = pools.Copy(dst, rc) + if err != nil { + return err + } + return rc.Close() +} + +// compressedFileOpener holds a reference to compressed a log file and will +// lazily open a decompressed version of the file. +type compressedFileOpener struct { + closed bool + + f *os.File + + lf *LogFile + ifBefore time.Time +} + +func (cfo *compressedFileOpener) ReaderAt(ctx context.Context) (_ sizeReaderAtCloser, retErr error) { + _, span := tracing.StartSpan(ctx, "logger.Logfile.Compressed.ReaderAt") + defer func() { + if retErr != nil { + span.SetStatus(retErr) + } + span.End() + }() + + span.SetAttributes(attribute.String("file", cfo.f.Name())) + + if cfo.closed { + return nil, errors.New("compressed file closed") + } + + gzr, err := gzip.NewReader(cfo.f) + if err != nil { + return nil, err + } + defer gzr.Close() + + // Extract the last log entry timestamp from the gzip header + // Use this to determine if we even need to read this file based on inputs + extra := &rotateFileMetadata{} + err = json.Unmarshal(gzr.Header.Extra, extra) + if err == nil && !extra.LastTime.IsZero() && extra.LastTime.Before(cfo.ifBefore) { + span.SetAttributes(attribute.Bool("skip", true)) + return &sizeReaderAtWithCloser{}, nil + } + if err == nil { + span.SetAttributes(attribute.Stringer("lastLogTime", extra.LastTime)) + } + + span.AddEvent("Start decompress") + return cfo.lf.decompress.Do(cfo.f) +} + +func (cfo *compressedFileOpener) Close() { + cfo.closed = true + cfo.f.Close() +} + +func (cfo *compressedFileOpener) Ref() string { + return cfo.f.Name() +} + +type emptyFileOpener struct{} + +func (emptyFileOpener) ReaderAt(context.Context) (sizeReaderAtCloser, error) { + return &sizeReaderAtWithCloser{}, nil +} + +func (emptyFileOpener) Close() {} + +func (emptyFileOpener) Ref() string { + return "null" } // openRotatedFiles returns a slice of files open for reading, in order from // oldest to newest, and calls w.fsopMu.RUnlock() before returning. // // This method must only be called with w.fsopMu locked for reading. -func (w *LogFile) openRotatedFiles(config logger.ReadConfig) (files []readAtCloser, err error) { - type rotatedFile struct { - f *os.File - compressed bool - } +func (w *LogFile) openRotatedFiles(ctx context.Context, config logger.ReadConfig) (_ []fileOpener, retErr error) { + var out []fileOpener - var q []rotatedFile defer func() { - if err != nil { - for _, qq := range q { - qq.f.Close() - } - for _, f := range files { - f.Close() + if retErr != nil { + for _, fo := range out { + fo.Close() } } }() - q, err = func() (q []rotatedFile, err error) { - defer w.fsopMu.RUnlock() + for i := w.maxFiles; i > 1; i-- { + fo, err := w.openRotatedFile(ctx, i-1, config) + if err != nil { + return nil, err + } + out = append(out, fo) + } - q = make([]rotatedFile, 0, w.maxFiles) - for i := w.maxFiles; i > 1; i-- { - var f rotatedFile - f.f, err = open(fmt.Sprintf("%s.%d", w.f.Name(), i-1)) - if err != nil { - if !errors.Is(err, fs.ErrNotExist) { - return nil, errors.Wrap(err, "error opening rotated log file") - } - f.compressed = true - f.f, err = open(fmt.Sprintf("%s.%d.gz", w.f.Name(), i-1)) - if err != nil { - if !errors.Is(err, fs.ErrNotExist) { - return nil, errors.Wrap(err, "error opening file for decompression") - } - continue + return out, nil +} + +func (w *LogFile) openRotatedFile(ctx context.Context, i int, config logger.ReadConfig) (fileOpener, error) { + f, err := open(fmt.Sprintf("%s.%d", w.f.Name(), i)) + if err == nil { + return &simpleFileOpener{ + f: f, + }, nil + } + + if !errors.Is(err, fs.ErrNotExist) { + return nil, errors.Wrap(err, "error opening rotated log file") + } + + f, err = open(fmt.Sprintf("%s.%d.gz", w.f.Name(), i)) + if err != nil { + if !errors.Is(err, fs.ErrNotExist) { + return nil, errors.Wrap(err, "error opening file for decompression") + } + return &emptyFileOpener{}, nil + } + + return &compressedFileOpener{ + f: f, + lf: w, + ifBefore: config.Since, + }, nil +} + +// This is used to improve type safety around tailing logs +// Some log readers require the log file to be closed, so this makes sure all +// implementers have a closer even if it may be a no-op. +// This is opposed to asserting a type. +type sizeReaderAtCloser interface { + SizeReaderAt + io.Closer +} + +func getTailFiles(ctx context.Context, files []fileOpener, nLines int, getTailReader GetTailReaderFunc) (_ []sizeReaderAtCloser, retErr error) { + ctx, span := tracing.StartSpan(ctx, "logger.Logfile.CollectTailFiles") + span.SetAttributes(attribute.Int("requested_lines", nLines)) + + defer func() { + if retErr != nil { + span.SetStatus(retErr) + } + span.End() + }() + out := make([]sizeReaderAtCloser, 0, len(files)) + + defer func() { + if retErr != nil { + for _, ra := range out { + if err := ra.Close(); err != nil { + log.G(ctx).WithError(err).Warn("Error closing log reader") } } - q = append(q, f) } - return q, nil }() - if err != nil { - return nil, err - } - for len(q) > 0 { - qq := q[0] - q = q[1:] - if qq.compressed { - defer qq.f.Close() - f, err := w.maybeDecompressFile(qq.f, config) + if nLines <= 0 { + for _, fo := range files { + span.AddEvent("Open file", trace.WithAttributes(attribute.String("file", fo.Ref()))) + + ra, err := fo.ReaderAt(ctx) if err != nil { return nil, err } - if f != nil { - // The log before `config.Since` does not need to read - files = append(files, f) - } - } else { - files = append(files, qq.f) + out = append(out, ra) + } + return out, nil } - return files, nil -} -func (w *LogFile) maybeDecompressFile(cf *os.File, config logger.ReadConfig) (readAtCloser, error) { - rc, err := gzip.NewReader(cf) - if err != nil { - return nil, errors.Wrap(err, "error making gzip reader for compressed log file") - } - defer rc.Close() + for i := len(files) - 1; i >= 0 && nLines > 0; i-- { + if err := ctx.Err(); err != nil { + return nil, errors.Wrap(err, "stopping parsing files to tail due to error") + } - // Extract the last log entry timestramp from the gzip header - extra := &rotateFileMetadata{} - err = json.Unmarshal(rc.Header.Extra, extra) - if err == nil && !extra.LastTime.IsZero() && extra.LastTime.Before(config.Since) { - return nil, nil - } - tmpf, err := w.decompress.Do(cf) - return tmpf, errors.Wrap(err, "error decompressing log file") -} + fo := files[i] -func decompress(dst io.WriteSeeker, src io.ReadSeeker) error { - if _, err := src.Seek(0, io.SeekStart); err != nil { - return err - } - rc, err := gzip.NewReader(src) - if err != nil { - return err - } - _, err = pools.Copy(dst, rc) - if err != nil { - return err + fileAttr := attribute.String("file", fo.Ref()) + span.AddEvent("Open file", trace.WithAttributes(fileAttr)) + + ra, err := fo.ReaderAt(ctx) + if err != nil { + return nil, err + } + + span.AddEvent("Scan file to tail", trace.WithAttributes(fileAttr, attribute.Int("remaining_lines", nLines))) + + tail, n, err := getTailReader(ctx, ra, nLines) + if err != nil { + ra.Close() + log.G(ctx).WithError(err).Warn("Error scanning log file for tail file request, skipping") + continue + } + nLines -= n + out = append(out, &sizeReaderAtWithCloser{tail, ra.Close}) } - return rc.Close() + + slices.Reverse(out) + + return out, nil } -func tailFiles(files []SizeReaderAt, watcher *logger.LogWatcher, dec Decoder, getTailReader GetTailReaderFunc, nLines int, fwd *forwarder) (cont bool) { - ctx, cancel := context.WithCancel(context.Background()) +func tailFiles(ctx context.Context, files []fileOpener, watcher *logger.LogWatcher, dec Decoder, getTailReader GetTailReaderFunc, nLines int, fwd *forwarder) (cont bool) { + ctx, cancel := context.WithCancel(ctx) defer cancel() - cont = true - // TODO(@cpuguy83): we should plumb a context through instead of dealing with `WatchClose()` here. go func() { select { case <-ctx.Done(): @@ -587,27 +787,64 @@ func tailFiles(files []SizeReaderAt, watcher *logger.LogWatcher, dec Decoder, ge } }() - readers := make([]io.Reader, 0, len(files)) + readers, err := getTailFiles(ctx, files, nLines, getTailReader) + if err != nil { + watcher.Err <- err + return false + } - if nLines > 0 { - for i := len(files) - 1; i >= 0 && nLines > 0; i-- { - tail, n, err := getTailReader(ctx, files[i], nLines) - if err != nil { - watcher.Err <- errors.Wrap(err, "error finding file position to start log tailing") - return false + var idx int + defer func() { + // Make sure all are released if there is an early return. + if !cont { + for _, r := range readers[idx:] { + if err := r.Close(); err != nil { + log.G(ctx).WithError(err).Debug("Error closing log reader") + } } - nLines -= n - readers = append([]io.Reader{tail}, readers...) } - } else { - for _, r := range files { - readers = append(readers, r) + }() + + for _, ra := range readers { + ra := ra + select { + case <-watcher.WatchConsumerGone(): + return false + case <-ctx.Done(): + return false + default: + } + + dec.Reset(ra) + + cancel := context.AfterFunc(ctx, func() { + if err := ra.Close(); err != nil { + log.G(ctx).WithError(err).Debug("Error closing log reader") + } + }) + + ok := fwd.Do(ctx, watcher, func() (*logger.Message, error) { + msg, err := dec.Decode() + if err != nil && !errors.Is(err, io.EOF) { + // We have an error decoding the stream, but we don't want to error out + // the whole log reader. + // If we return anything other than EOF then the forwarder will return + // false and we'll exit the loop. + // Instead just log the error here and return an EOF so we can move to + // the next file. + log.G(ctx).WithError(err).Warn("Error decoding log file") + return nil, io.EOF + } + return msg, err + }) + cancel() + idx++ + if !ok { + return false } } - rdr := io.MultiReader(readers...) - dec.Reset(rdr) - return fwd.Do(watcher, dec) + return true } type forwarder struct { @@ -622,16 +859,35 @@ func newForwarder(config logger.ReadConfig) *forwarder { // conditions to watcher. Do returns cont=true iff it has read all messages from // dec without encountering a message with a timestamp which is after the // configured until time. -func (fwd *forwarder) Do(watcher *logger.LogWatcher, dec Decoder) (cont bool) { +func (fwd *forwarder) Do(ctx context.Context, watcher *logger.LogWatcher, next func() (*logger.Message, error)) (cont bool) { + ctx, span := tracing.StartSpan(ctx, "logger.Logfile.Forward") + defer func() { + span.SetAttributes(attribute.Bool("continue", cont)) + span.End() + }() + for { - msg, err := dec.Decode() + select { + case <-watcher.WatchConsumerGone(): + span.AddEvent("watch consumer gone") + return false + case <-ctx.Done(): + span.AddEvent(ctx.Err().Error()) + return false + default: + } + + msg, err := next() if err != nil { if errors.Is(err, io.EOF) { + span.AddEvent("EOF") return true } - watcher.Err <- err + span.SetStatus(err) + log.G(ctx).WithError(err).Debug("Error while decoding log entry, not continuing") return false } + if !fwd.since.IsZero() { if msg.Timestamp.Before(fwd.since) { continue @@ -643,10 +899,16 @@ func (fwd *forwarder) Do(watcher *logger.LogWatcher, dec Decoder) (cont bool) { fwd.since = time.Time{} } if !fwd.until.IsZero() && msg.Timestamp.After(fwd.until) { + log.G(ctx).Debug("Log is newer than requested window, skipping remaining logs") return false } + select { + case <-ctx.Done(): + span.AddEvent(ctx.Err().Error()) + return false case <-watcher.WatchConsumerGone(): + span.AddEvent("watch consumer gone") return false case watcher.Msg <- msg: } diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/sharedtemp.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/sharedtemp.go index c3493caabc..f5c8c1f035 100644 --- a/vendor/github.com/docker/docker/daemon/logger/loggerutils/sharedtemp.go +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/sharedtemp.go @@ -76,7 +76,7 @@ func (c *sharedTempFileConverter) Do(f *os.File) (*sharedFileReader, error) { // ModTime, which conveniently also handles the case of true // positives where the file has also been modified since it was // first converted. - if os.SameFile(tf.src, stat) && tf.src.ModTime() == stat.ModTime() { + if os.SameFile(tf.src, stat) && tf.src.ModTime().Equal(stat.ModTime()) { return c.openExisting(st, id, tf) } } diff --git a/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go b/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go index 7a8c6aebd6..1951ca88bd 100644 --- a/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go +++ b/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go @@ -12,7 +12,7 @@ import ( ) func openPluginStream(a *pluginAdapter) (io.WriteCloser, error) { - // Make sure to also open with read (in addition to write) to avoid borken pipe errors on plugin failure. + // Make sure to also open with read (in addition to write) to avoid broken pipe errors on plugin failure. // It is up to the plugin to keep track of pipes that it should re-attach to, however. // If the plugin doesn't open for reads, then the container will block once the pipe is full. f, err := fifo.OpenFifo(context.Background(), a.fifoPath, unix.O_RDWR|unix.O_CREAT|unix.O_NONBLOCK, 0o700) diff --git a/vendor/github.com/docker/docker/daemon/logger/ring.go b/vendor/github.com/docker/docker/daemon/logger/ring.go index 8c19b543d6..1c1e9a64a2 100644 --- a/vendor/github.com/docker/docker/daemon/logger/ring.go +++ b/vendor/github.com/docker/docker/daemon/logger/ring.go @@ -1,6 +1,7 @@ package logger // import "github.com/docker/docker/daemon/logger" import ( + "context" "errors" "sync" "sync/atomic" @@ -20,19 +21,22 @@ type RingLogger struct { wg sync.WaitGroup } -var _ SizedLogger = &RingLogger{} +var ( + _ SizedLogger = (*RingLogger)(nil) + _ LogReader = (*ringWithReader)(nil) +) type ringWithReader struct { *RingLogger } -func (r *ringWithReader) ReadLogs(cfg ReadConfig) *LogWatcher { +func (r *ringWithReader) ReadLogs(ctx context.Context, cfg ReadConfig) *LogWatcher { reader, ok := r.l.(LogReader) if !ok { // something is wrong if we get here panic("expected log reader") } - return reader.ReadLogs(cfg) + return reader.ReadLogs(ctx, cfg) } func newRingLogger(driver Logger, logInfo Info, maxSize int64) *RingLogger { diff --git a/vendor/github.com/docker/docker/internal/sliceutil/sliceutil.go b/vendor/github.com/docker/docker/internal/sliceutil/sliceutil.go index 5d5856ecc5..48e5f723f2 100644 --- a/vendor/github.com/docker/docker/internal/sliceutil/sliceutil.go +++ b/vendor/github.com/docker/docker/internal/sliceutil/sliceutil.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.21 +//go:build go1.22 package sliceutil diff --git a/vendor/github.com/docker/docker/layer/filestore.go b/vendor/github.com/docker/docker/layer/filestore.go index 96ede8711b..7870f6ee33 100644 --- a/vendor/github.com/docker/docker/layer/filestore.go +++ b/vendor/github.com/docker/docker/layer/filestore.go @@ -7,7 +7,6 @@ import ( "io" "os" "path/filepath" - "regexp" "strconv" "strings" @@ -18,14 +17,11 @@ import ( "github.com/pkg/errors" ) -var ( - stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`) - supportedAlgorithms = []digest.Algorithm{ - digest.SHA256, - // digest.SHA384, // Currently not used - // digest.SHA512, // Currently not used - } -) +var supportedAlgorithms = []digest.Algorithm{ + digest.SHA256, + // digest.SHA384, // Currently not used + // digest.SHA512, // Currently not used +} type fileMetadataStore struct { root string @@ -262,7 +258,7 @@ func (fms *fileMetadataStore) GetMountID(mount string) (string, error) { } content := strings.TrimSpace(string(contentBytes)) - if !stringIDRegexp.MatchString(content) { + if !isValidID(content) { return "", errors.New("invalid mount id value") } @@ -279,7 +275,7 @@ func (fms *fileMetadataStore) GetInitID(mount string) (string, error) { } content := strings.TrimSpace(string(contentBytes)) - if !stringIDRegexp.MatchString(content) { + if !isValidID(content) { return "", errors.New("invalid init id value") } @@ -431,3 +427,18 @@ func (fms *fileMetadataStore) Remove(layer ChainID, cache string) error { func (fms *fileMetadataStore) RemoveMount(mount string) error { return os.RemoveAll(fms.getMountDirectory(mount)) } + +// isValidID checks if mount/init id is valid. It is similar to +// regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`).MatchString(id). +func isValidID(id string) bool { + id = strings.TrimSuffix(id, "-init") + if len(id) != 64 { + return false + } + for _, c := range id { + if (c < '0' || c > '9') && (c < 'a' || c > 'f') { + return false + } + } + return true +} diff --git a/vendor/github.com/docker/docker/layer/layer.go b/vendor/github.com/docker/docker/layer/layer.go index 28ad0fc9c7..3f2d3adcaa 100644 --- a/vendor/github.com/docker/docker/layer/layer.go +++ b/vendor/github.com/docker/docker/layer/layer.go @@ -199,11 +199,11 @@ func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID { return parent } if parent == "" { - return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...) + return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...) // #nosec G602 -- slice index out of range, which is a false positive } // H = "H(n-1) SHA256(n)" - dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) - return createChainIDFromParent(ChainID(dgst), dgsts[1:]...) + dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) // #nosec G602 -- slice index out of range, which is a false positive + return createChainIDFromParent(ChainID(dgst), dgsts[1:]...) // #nosec G602 -- slice index out of range, which is a false positive } // ReleaseAndLog releases the provided layer from the given layer diff --git a/vendor/github.com/docker/docker/oci/caps/utils.go b/vendor/github.com/docker/docker/oci/caps/utils.go index c61f6b49e9..1cdcf5b7b3 100644 --- a/vendor/github.com/docker/docker/oci/caps/utils.go +++ b/vendor/github.com/docker/docker/oci/caps/utils.go @@ -21,7 +21,7 @@ var ( knownCaps map[string]*struct{} ) -// GetAllCapabilities returns all capabilities that are availeble in the current +// GetAllCapabilities returns all capabilities that are available in the current // environment. func GetAllCapabilities() []string { initCaps() diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go index 45ac2aa6ce..b9d2a538ab 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go @@ -6,8 +6,8 @@ import ( "path/filepath" "strings" - "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/system" + "github.com/moby/sys/userns" "github.com/pkg/errors" "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go index 3b6d8a77aa..07739462e0 100644 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go @@ -36,7 +36,7 @@ func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error // This should be used to prevent a potential attacker from manipulating `dest` // such that it would provide access to files outside of `dest` through things // like symlinks. Normally `ResolveSymlinksInScope` would handle this, however -// sanitizing symlinks in this manner is inherrently racey: +// sanitizing symlinks in this manner is inherently racey: // ref: CVE-2018-15664 func UntarWithRoot(tarArchive io.Reader, dest string, options *archive.TarOptions, root string) error { return untarHandler(tarArchive, dest, options, true, root) diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go index d9f26074a5..e12ba86aca 100644 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go @@ -6,8 +6,8 @@ import ( "io" "path/filepath" - "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/archive" + "github.com/moby/sys/userns" ) // applyLayerHandler parses a diff in the standard layer format from `layer`, and diff --git a/vendor/github.com/docker/docker/pkg/containerfs/containerfs.go b/vendor/github.com/docker/docker/pkg/containerfs/containerfs.go deleted file mode 100644 index 3b7fd80f28..0000000000 --- a/vendor/github.com/docker/docker/pkg/containerfs/containerfs.go +++ /dev/null @@ -1,15 +0,0 @@ -package containerfs // import "github.com/docker/docker/pkg/containerfs" - -import "path/filepath" - -// CleanScopedPath prepares the given path to be combined with a mount path or -// a drive-letter. On Windows, it removes any existing driveletter (e.g. "C:"). -// The returned path is always prefixed with a [filepath.Separator]. -func CleanScopedPath(path string) string { - if len(path) >= 2 { - if v := filepath.VolumeName(path); len(v) > 0 { - path = path[len(v):] - } - } - return filepath.Join(string(filepath.Separator), path) -} diff --git a/vendor/github.com/docker/docker/pkg/containerfs/rm.go b/vendor/github.com/docker/docker/pkg/containerfs/rm.go deleted file mode 100644 index 303714a180..0000000000 --- a/vendor/github.com/docker/docker/pkg/containerfs/rm.go +++ /dev/null @@ -1,78 +0,0 @@ -//go:build !darwin && !windows - -package containerfs // import "github.com/docker/docker/pkg/containerfs" - -import ( - "os" - "syscall" - "time" - - "github.com/moby/sys/mount" - "github.com/pkg/errors" -) - -// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can -// often be remedied. -// Only use `EnsureRemoveAll` if you really want to make every effort to remove -// a directory. -// -// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there -// can be a race between reading directory entries and then actually attempting -// to remove everything in the directory. -// These types of errors do not need to be returned since it's ok for the dir to -// be gone we can just retry the remove operation. -// -// This should not return a `os.ErrNotExist` kind of error under any circumstances -func EnsureRemoveAll(dir string) error { - notExistErr := make(map[string]bool) - - // track retries - exitOnErr := make(map[string]int) - maxRetry := 50 - - // Attempt to unmount anything beneath this dir first - mount.RecursiveUnmount(dir) - - for { - err := os.RemoveAll(dir) - if err == nil { - return nil - } - - pe, ok := err.(*os.PathError) - if !ok { - return err - } - - if os.IsNotExist(err) { - if notExistErr[pe.Path] { - return err - } - notExistErr[pe.Path] = true - - // There is a race where some subdir can be removed but after the parent - // dir entries have been read. - // So the path could be from `os.Remove(subdir)` - // If the reported non-existent path is not the passed in `dir` we - // should just retry, but otherwise return with no error. - if pe.Path == dir { - return nil - } - continue - } - - if pe.Err != syscall.EBUSY { - return err - } - - if e := mount.Unmount(pe.Path); e != nil { - return errors.Wrapf(e, "error while removing %s", dir) - } - - if exitOnErr[pe.Path] == maxRetry { - return err - } - exitOnErr[pe.Path]++ - time.Sleep(100 * time.Millisecond) - } -} diff --git a/vendor/github.com/docker/docker/pkg/containerfs/rm_windows.go b/vendor/github.com/docker/docker/pkg/containerfs/rm_windows.go deleted file mode 100644 index 779979ed3d..0000000000 --- a/vendor/github.com/docker/docker/pkg/containerfs/rm_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package containerfs // import "github.com/docker/docker/pkg/containerfs" - -import "os" - -// EnsureRemoveAll is an alias to os.RemoveAll on Windows -var EnsureRemoveAll = os.RemoveAll diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go index 035160c834..8d2c8857fb 100644 --- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go +++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go @@ -290,7 +290,7 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, } // Stream is an io.Writer for output with utilities to get the output's file -// descriptor and to detect wether it's a terminal. +// descriptor and to detect whether it's a terminal. // // it is subset of the streams.Out type in // https://pkg.go.dev/github.com/docker/cli@v20.10.17+incompatible/cli/streams#Out diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery.go b/vendor/github.com/docker/docker/pkg/plugins/discovery.go index 503ac574a9..baa39ccccf 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/discovery.go +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery.go @@ -10,8 +10,8 @@ import ( "strings" "sync" - "github.com/containerd/containerd/pkg/userns" "github.com/containerd/log" + "github.com/moby/sys/userns" "github.com/pkg/errors" ) diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins.go b/vendor/github.com/docker/docker/pkg/plugins/plugins.go index 2efd8508bf..96c8e2b7fd 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/plugins.go +++ b/vendor/github.com/docker/docker/pkg/plugins/plugins.go @@ -236,7 +236,6 @@ func loadWithRetry(name string, retry bool) (*Plugin, error) { storage.Unlock() err = pl.activate() - if err != nil { storage.Lock() delete(storage.plugins, name) diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go index 3792c67a9e..3ea3012b18 100644 --- a/vendor/github.com/docker/docker/pkg/pools/pools.go +++ b/vendor/github.com/docker/docker/pkg/pools/pools.go @@ -124,7 +124,7 @@ func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { } // NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back -// into the pool and closes the writer if it's an io.Writecloser. +// into the pool and closes the writer if it's an io.WriteCloser. func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { return ioutils.NewWriteCloserWrapper(w, func() error { buf.Flush() diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go index 5e29a6b3b8..97f355d2e4 100644 --- a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go @@ -10,7 +10,9 @@ import ( // Lstat takes a path to a file and returns // a system.StatT type pertaining to that file. // -// Throws an error if the file does not exist +// Throws an error if the file does not exist. +// +// Deprecated: this function is only used internally, and will be removed in the next release. func Lstat(path string) (*StatT, error) { s := &syscall.Stat_t{} if err := syscall.Lstat(path, s); err != nil { diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go index 359c791d9b..4180f3ac20 100644 --- a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go @@ -4,6 +4,8 @@ import "os" // Lstat calls os.Lstat to get a fileinfo interface back. // This is then copied into our own locally defined structure. +// +// Deprecated: this function is only used internally, and will be removed in the next release. func Lstat(path string) (*StatT, error) { fi, err := os.Lstat(path) if err != nil { diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go index 2a62237a45..e0cd22d7a7 100644 --- a/vendor/github.com/docker/docker/pkg/system/mknod.go +++ b/vendor/github.com/docker/docker/pkg/system/mknod.go @@ -11,6 +11,8 @@ import ( // Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. // They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, // then the top 12 bits of the minor. +// +// Deprecated: this function is only used internally, and will be removed in the next release. func Mkdev(major int64, minor int64) uint32 { return uint32(unix.Mkdev(uint32(major), uint32(minor))) } diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go b/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go index e218e742d4..4f66453d62 100644 --- a/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go +++ b/vendor/github.com/docker/docker/pkg/system/mknod_freebsd.go @@ -8,6 +8,8 @@ import ( // Mknod creates a filesystem node (file, device special file or named pipe) named path // with attributes specified by mode and dev. +// +// Deprecated: this function is only used internally, and will be removed in the next release. func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) } diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_unix.go b/vendor/github.com/docker/docker/pkg/system/mknod_unix.go index 34df0b9236..34c5532631 100644 --- a/vendor/github.com/docker/docker/pkg/system/mknod_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/mknod_unix.go @@ -8,6 +8,8 @@ import ( // Mknod creates a filesystem node (file, device special file or named pipe) named path // with attributes specified by mode and dev. +// +// Deprecated: this function is only used internally, and will be removed in the next release. func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go index 4309d42b9f..0557235f98 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go @@ -17,6 +17,8 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) { // FromStatT converts a syscall.Stat_t type to a system.Stat_t type // This is exposed on Linux as pkg/archive/changes uses it. +// +// Deprecated: this function is only used internally, and will be removed in the next release. func FromStatT(s *syscall.Stat_t) (*StatT, error) { return fromStatT(s) } diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unix.go b/vendor/github.com/docker/docker/pkg/system/stat_unix.go index 205e54677d..661b0bed20 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_unix.go @@ -9,6 +9,8 @@ import ( // StatT type contains status of a file. It contains metadata // like permission, owner, group, size, etc about a file. +// +// Deprecated: this type is only used internally, and will be removed in the next release. type StatT struct { mode uint32 uid uint32 @@ -56,7 +58,9 @@ func (s StatT) IsDir() bool { // Stat takes a path to a file and returns // a system.StatT type pertaining to that file. // -// Throws an error if the file does not exist +// Throws an error if the file does not exist. +// +// Deprecated: this function is only used internally, and will be removed in the next release. func Stat(path string) (*StatT, error) { s := &syscall.Stat_t{} if err := syscall.Stat(path, s); err != nil { diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go index 10876cd73e..e74a0f4fd7 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_windows.go @@ -7,6 +7,8 @@ import ( // StatT type contains status of a file. It contains metadata // like permission, size, etc about a file. +// +// Deprecated: this type is only used internally, and will be removed in the next release. type StatT struct { mode os.FileMode size int64 @@ -31,7 +33,9 @@ func (s StatT) Mtim() time.Time { // Stat takes a path to a file and returns // a system.StatT type pertaining to that file. // -// Throws an error if the file does not exist +// Throws an error if the file does not exist. +// +// Deprecated: this function is only used internally, and will be removed in the next release. func Stat(path string) (*StatT, error) { fi, err := os.Stat(path) if err != nil { diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go index facfbb3126..b877ecc5a9 100644 --- a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go +++ b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go @@ -6,7 +6,7 @@ import ( // Lgetxattr retrieves the value of the extended attribute identified by attr // and associated with the given path in the file system. -// It will returns a nil slice and nil error if the xattr is not set. +// It returns a nil slice and nil error if the xattr is not set. func Lgetxattr(path string, attr string) ([]byte, error) { sysErr := func(err error) ([]byte, error) { return nil, &XattrError{Op: "lgetxattr", Attr: attr, Path: path, Err: err} diff --git a/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go b/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go index afc84f00bb..af70b3e8a5 100644 --- a/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go +++ b/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go @@ -48,7 +48,7 @@ type SizeReaderAt interface { } // NewTailReader scopes the passed in reader to just the last N lines passed in -func NewTailReader(ctx context.Context, r SizeReaderAt, reqLines int) (io.Reader, int, error) { +func NewTailReader(ctx context.Context, r SizeReaderAt, reqLines int) (*io.SectionReader, int, error) { return NewTailReaderWithDelimiter(ctx, r, reqLines, eol) } @@ -56,7 +56,7 @@ func NewTailReader(ctx context.Context, r SizeReaderAt, reqLines int) (io.Reader // In this case a "line" is defined by the passed in delimiter. // // Delimiter lengths should be generally small, no more than 12 bytes -func NewTailReaderWithDelimiter(ctx context.Context, r SizeReaderAt, reqLines int, delimiter []byte) (io.Reader, int, error) { +func NewTailReaderWithDelimiter(ctx context.Context, r SizeReaderAt, reqLines int, delimiter []byte) (*io.SectionReader, int, error) { if reqLines < 1 { return nil, 0, ErrNonPositiveLinesNumber } @@ -71,7 +71,7 @@ func NewTailReaderWithDelimiter(ctx context.Context, r SizeReaderAt, reqLines in ) if int64(len(delimiter)) >= size { - return bytes.NewReader(nil), 0, nil + return io.NewSectionReader(bytes.NewReader(nil), 0, 0), 0, nil } scanner := newScanner(r, delimiter) @@ -92,7 +92,7 @@ func NewTailReaderWithDelimiter(ctx context.Context, r SizeReaderAt, reqLines in tailStart = scanner.Start(ctx) if found == 0 { - return bytes.NewReader(nil), 0, nil + return io.NewSectionReader(bytes.NewReader(nil), 0, 0), 0, nil } if found < reqLines && tailStart != 0 { diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin.go b/vendor/github.com/docker/docker/plugin/v2/plugin.go index 522adeb4de..5712a94847 100644 --- a/vendor/github.com/docker/docker/plugin/v2/plugin.go +++ b/vendor/github.com/docker/docker/plugin/v2/plugin.go @@ -55,6 +55,7 @@ func (p *Plugin) ScopedPath(s string) string { } // Client returns the plugin client. +// // Deprecated: use p.Addr() and manually create the client func (p *Plugin) Client() *plugins.Client { p.mu.RLock() @@ -64,6 +65,7 @@ func (p *Plugin) Client() *plugins.Client { } // SetPClient set the plugin client. +// // Deprecated: Hardcoded plugin client is deprecated func (p *Plugin) SetPClient(client *plugins.Client) { p.mu.Lock() diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go b/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go index 3c4e143138..0553d7a86e 100644 --- a/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go +++ b/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go @@ -1,5 +1,5 @@ // FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: -//go:build go1.21 +//go:build go1.22 package v2 // import "github.com/docker/docker/plugin/v2" @@ -9,11 +9,11 @@ import ( "runtime" "strings" - "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/api/types" "github.com/docker/docker/internal/rootless/mountopts" "github.com/docker/docker/internal/sliceutil" "github.com/docker/docker/oci" + "github.com/moby/sys/userns" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" ) diff --git a/vendor/github.com/docker/docker/volume/mounts/linux_parser.go b/vendor/github.com/docker/docker/volume/mounts/linux_parser.go index 898661a574..507003a086 100644 --- a/vendor/github.com/docker/docker/volume/mounts/linux_parser.go +++ b/vendor/github.com/docker/docker/volume/mounts/linux_parser.go @@ -8,7 +8,6 @@ import ( "strings" "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/volume" ) @@ -329,9 +328,8 @@ func (p *linuxParser) parseMountSpec(cfg mount.Mount, validateBindSourceExists b switch cfg.Type { case mount.TypeVolume: - if cfg.Source == "" { - mp.Name = stringid.GenerateRandomID() - } else { + if cfg.Source != "" { + // non-anonymous volume mp.Name = cfg.Source } mp.CopyData = p.DefaultCopyMode() diff --git a/vendor/github.com/docker/docker/volume/mounts/windows_parser.go b/vendor/github.com/docker/docker/volume/mounts/windows_parser.go index c3a6c6bb69..062ede0356 100644 --- a/vendor/github.com/docker/docker/volume/mounts/windows_parser.go +++ b/vendor/github.com/docker/docker/volume/mounts/windows_parser.go @@ -10,7 +10,6 @@ import ( "strings" "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/pkg/stringid" ) // NewWindowsParser creates a parser with Windows semantics. @@ -393,9 +392,8 @@ func (p *windowsParser) parseMountSpec(cfg mount.Mount, convertTargetToBackslash switch cfg.Type { case mount.TypeVolume: - if cfg.Source == "" { - mp.Name = stringid.GenerateRandomID() - } else { + if cfg.Source != "" { + // non-anonymous volume mp.Name = cfg.Source } mp.CopyData = p.DefaultCopyMode() diff --git a/vendor/github.com/moby/sys/userns/LICENSE b/vendor/github.com/moby/sys/userns/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/moby/sys/userns/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/moby/sys/userns/userns.go b/vendor/github.com/moby/sys/userns/userns.go new file mode 100644 index 0000000000..56b24c44ad --- /dev/null +++ b/vendor/github.com/moby/sys/userns/userns.go @@ -0,0 +1,16 @@ +// Package userns provides utilities to detect whether we are currently running +// in a Linux user namespace. +// +// This code was migrated from [libcontainer/runc], which based its implementation +// on code from [lcx/incus]. +// +// [libcontainer/runc]: https://github.com/opencontainers/runc/blob/3778ae603c706494fd1e2c2faf83b406e38d687d/libcontainer/userns/userns_linux.go#L12-L49 +// [lcx/incus]: https://github.com/lxc/incus/blob/e45085dd42f826b3c8c3228e9733c0b6f998eafe/shared/util.go#L678-L700 +package userns + +// RunningInUserNS detects whether we are currently running in a Linux +// user namespace and memoizes the result. It returns false on non-Linux +// platforms. +func RunningInUserNS() bool { + return inUserNS() +} diff --git a/vendor/github.com/moby/sys/userns/userns_linux.go b/vendor/github.com/moby/sys/userns/userns_linux.go new file mode 100644 index 0000000000..87c1c38eec --- /dev/null +++ b/vendor/github.com/moby/sys/userns/userns_linux.go @@ -0,0 +1,53 @@ +package userns + +import ( + "bufio" + "fmt" + "os" + "sync" +) + +var inUserNS = sync.OnceValue(runningInUserNS) + +// runningInUserNS detects whether we are currently running in a user namespace. +// +// This code was migrated from [libcontainer/runc] and based on an implementation +// from [lcx/incus]. +// +// [libcontainer/runc]: https://github.com/opencontainers/runc/blob/3778ae603c706494fd1e2c2faf83b406e38d687d/libcontainer/userns/userns_linux.go#L12-L49 +// [lcx/incus]: https://github.com/lxc/incus/blob/e45085dd42f826b3c8c3228e9733c0b6f998eafe/shared/util.go#L678-L700 +func runningInUserNS() bool { + file, err := os.Open("/proc/self/uid_map") + if err != nil { + // This kernel-provided file only exists if user namespaces are supported. + return false + } + defer file.Close() + + buf := bufio.NewReader(file) + l, _, err := buf.ReadLine() + if err != nil { + return false + } + + return uidMapInUserNS(string(l)) +} + +func uidMapInUserNS(uidMap string) bool { + if uidMap == "" { + // File exist but empty (the initial state when userns is created, + // see user_namespaces(7)). + return true + } + + var a, b, c int64 + if _, err := fmt.Sscanf(uidMap, "%d %d %d", &a, &b, &c); err != nil { + // Assume we are in a regular, non user namespace. + return false + } + + // As per user_namespaces(7), /proc/self/uid_map of + // the initial user namespace shows 0 0 4294967295. + initNS := a == 0 && b == 0 && c == 4294967295 + return !initNS +} diff --git a/vendor/github.com/moby/sys/userns/userns_linux_fuzzer.go b/vendor/github.com/moby/sys/userns/userns_linux_fuzzer.go new file mode 100644 index 0000000000..26ba2e16ec --- /dev/null +++ b/vendor/github.com/moby/sys/userns/userns_linux_fuzzer.go @@ -0,0 +1,8 @@ +//go:build linux && gofuzz + +package userns + +func FuzzUIDMap(uidmap []byte) int { + _ = uidMapInUserNS(string(uidmap)) + return 1 +} diff --git a/vendor/github.com/moby/sys/userns/userns_unsupported.go b/vendor/github.com/moby/sys/userns/userns_unsupported.go new file mode 100644 index 0000000000..8ed83072c2 --- /dev/null +++ b/vendor/github.com/moby/sys/userns/userns_unsupported.go @@ -0,0 +1,6 @@ +//go:build !linux + +package userns + +// inUserNS is a stub for non-Linux systems. Always returns false. +func inUserNS() bool { return false } diff --git a/vendor/modules.txt b/vendor/modules.txt index 1f3ab8ce7a..280ef44178 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -503,7 +503,7 @@ github.com/docker/distribution github.com/docker/distribution/digestset github.com/docker/distribution/reference github.com/docker/distribution/registry/client/auth/challenge -# github.com/docker/docker v27.0.3+incompatible +# github.com/docker/docker v27.4.1+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types @@ -561,7 +561,6 @@ github.com/docker/docker/oci/caps github.com/docker/docker/pkg/archive github.com/docker/docker/pkg/broadcaster github.com/docker/docker/pkg/chrootarchive -github.com/docker/docker/pkg/containerfs github.com/docker/docker/pkg/homedir github.com/docker/docker/pkg/idtools github.com/docker/docker/pkg/ioutils @@ -902,6 +901,9 @@ github.com/moby/sys/symlink # github.com/moby/sys/user v0.1.0 ## explicit; go 1.17 github.com/moby/sys/user +# github.com/moby/sys/userns v0.1.0 +## explicit; go 1.21 +github.com/moby/sys/userns # github.com/moby/term v0.5.0 ## explicit; go 1.18 github.com/moby/term