summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cmd/podman/spec.go2
-rw-r--r--cmd/podman/spec_test.go2
-rw-r--r--vendor/github.com/containers/image/copy/copy.go11
-rw-r--r--vendor/github.com/containers/image/directory/directory_src.go5
-rw-r--r--vendor/github.com/containers/image/docker/archive/src.go5
-rw-r--r--vendor/github.com/containers/image/docker/daemon/daemon_src.go5
-rw-r--r--vendor/github.com/containers/image/docker/docker_image_src.go5
-rw-r--r--vendor/github.com/containers/image/docker/tarfile/dest.go6
-rw-r--r--vendor/github.com/containers/image/docker/tarfile/src.go22
-rw-r--r--vendor/github.com/containers/image/docker/tarfile/types.go38
-rw-r--r--vendor/github.com/containers/image/image/docker_list.go2
-rw-r--r--vendor/github.com/containers/image/image/docker_schema1.go264
-rw-r--r--vendor/github.com/containers/image/image/docker_schema2.go152
-rw-r--r--vendor/github.com/containers/image/image/manifest.go76
-rw-r--r--vendor/github.com/containers/image/image/memory.go7
-rw-r--r--vendor/github.com/containers/image/image/oci.go116
-rw-r--r--vendor/github.com/containers/image/image/sourced.go4
-rw-r--r--vendor/github.com/containers/image/image/unparsed.go7
-rw-r--r--vendor/github.com/containers/image/manifest/docker_schema1.go310
-rw-r--r--vendor/github.com/containers/image/manifest/docker_schema2.go251
-rw-r--r--vendor/github.com/containers/image/manifest/manifest.go89
-rw-r--r--vendor/github.com/containers/image/manifest/oci.go120
-rw-r--r--vendor/github.com/containers/image/oci/archive/oci_src.go5
-rw-r--r--vendor/github.com/containers/image/oci/layout/oci_src.go5
-rw-r--r--vendor/github.com/containers/image/openshift/openshift.go5
-rw-r--r--vendor/github.com/containers/image/ostree/ostree_dest.go57
-rw-r--r--vendor/github.com/containers/image/ostree/ostree_src.go5
-rw-r--r--vendor/github.com/containers/image/storage/storage_image.go950
-rw-r--r--vendor/github.com/containers/image/storage/storage_reference.go69
-rw-r--r--vendor/github.com/containers/image/storage/storage_transport.go195
-rw-r--r--vendor/github.com/containers/image/tarball/tarball_src.go4
-rw-r--r--vendor/github.com/containers/image/types/types.go8
-rw-r--r--vendor/github.com/containers/image/vendor.conf5
-rw-r--r--vendor/github.com/containers/storage/drivers/overlay/overlay.go6
-rw-r--r--vendor/github.com/containers/storage/drivers/vfs/driver.go5
-rw-r--r--vendor/github.com/containers/storage/images.go103
-rw-r--r--vendor/github.com/containers/storage/images_ffjson.go60
-rw-r--r--vendor/github.com/containers/storage/pkg/archive/example_changes.go97
-rw-r--r--vendor/github.com/containers/storage/store.go42
-rw-r--r--vendor/github.com/containers/storage/vendor.conf2
-rw-r--r--vendor/github.com/coreos/go-systemd/daemon/sdnotify.go63
-rw-r--r--vendor/github.com/coreos/go-systemd/daemon/watchdog.go72
-rw-r--r--vendor/github.com/docker/docker/pkg/ioutils/buffer.go51
-rw-r--r--vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go186
-rw-r--r--vendor/github.com/docker/docker/pkg/ioutils/fswriters.go162
-rw-r--r--vendor/github.com/docker/docker/pkg/ioutils/readers.go154
-rw-r--r--vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go10
-rw-r--r--vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go18
-rw-r--r--vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go92
-rw-r--r--vendor/github.com/docker/docker/pkg/ioutils/writers.go66
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/devices/devices_linux.go100
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unsupported.go3
-rw-r--r--vendor/github.com/opencontainers/runc/libcontainer/devices/number.go24
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/filepath/abs.go52
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/filepath/ancestor.go32
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/filepath/clean.go56
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/filepath/doc.go6
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/filepath/join.go9
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/filepath/separator.go9
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/generate/generate.go207
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/specerror/bundle.go29
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/specerror/config-linux.go134
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/specerror/config-windows.go32
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/specerror/config.go188
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/specerror/error.go119
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/specerror/runtime-linux.go23
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/specerror/runtime.go179
-rw-r--r--vendor/github.com/opencontainers/runtime-tools/validate/validate.go312
-rw-r--r--vendor/github.com/stretchr/testify/require/doc.go28
-rw-r--r--vendor/github.com/stretchr/testify/require/forward_requirements.go16
-rw-r--r--vendor/github.com/stretchr/testify/require/require.go429
-rw-r--r--vendor/github.com/stretchr/testify/require/require_forward.go353
-rw-r--r--vendor/github.com/stretchr/testify/require/requirements.go9
-rw-r--r--vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt (renamed from vendor/k8s.io/utils/LICENSE)2
-rw-r--r--vendor/github.com/xeipuuv/gojsonpointer/README.md8
-rw-r--r--vendor/github.com/xeipuuv/gojsonpointer/pointer.go190
-rw-r--r--vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt202
-rw-r--r--vendor/github.com/xeipuuv/gojsonreference/README.md10
-rw-r--r--vendor/github.com/xeipuuv/gojsonreference/reference.go141
-rw-r--r--vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt202
-rw-r--r--vendor/github.com/xeipuuv/gojsonschema/README.md294
-rw-r--r--vendor/github.com/xeipuuv/gojsonschema/errors.go283
-rw-r--r--vendor/github.com/xeipuuv/gojsonschema/format_checkers.go250
-rw-r--r--vendor/github.com/xeipuuv/gojsonschema/internalLog.go37
-rw-r--r--vendor/github.com/xeipuuv/gojsonschema/jsonContext.go72
-rw-r--r--vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go341
-rw-r--r--vendor/github.com/xeipuuv/gojsonschema/locales.go286
-rw-r--r--vendor/github.com/xeipuuv/gojsonschema/result.go172
-rw-r--r--vendor/github.com/xeipuuv/gojsonschema/schema.go928
-rw-r--r--vendor/github.com/xeipuuv/gojsonschema/schemaPool.go109
-rw-r--r--vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go67
-rw-r--r--vendor/github.com/xeipuuv/gojsonschema/schemaType.go83
-rw-r--r--vendor/github.com/xeipuuv/gojsonschema/subSchema.go227
-rw-r--r--vendor/github.com/xeipuuv/gojsonschema/types.go58
-rw-r--r--vendor/github.com/xeipuuv/gojsonschema/utils.go208
-rw-r--r--vendor/github.com/xeipuuv/gojsonschema/validation.go844
-rw-r--r--vendor/k8s.io/utils/README.md51
-rw-r--r--vendor/k8s.io/utils/exec/doc.go18
-rw-r--r--vendor/k8s.io/utils/exec/exec.go202
99 files changed, 8279 insertions, 3351 deletions
diff --git a/cmd/podman/spec.go b/cmd/podman/spec.go
index adfdf7347..b13556d93 100644
--- a/cmd/podman/spec.go
+++ b/cmd/podman/spec.go
@@ -483,7 +483,7 @@ func (c *createConfig) GetVolumeMounts() ([]spec.Mount, error) {
}
}
if rootProp == "" {
- options = append(options, "rprivate")
+ options = append(options, "private")
}
m = append(m, spec.Mount{
diff --git a/cmd/podman/spec_test.go b/cmd/podman/spec_test.go
index 01e1a4ad3..586c09656 100644
--- a/cmd/podman/spec_test.go
+++ b/cmd/podman/spec_test.go
@@ -13,7 +13,7 @@ func TestCreateConfig_GetVolumeMounts(t *testing.T) {
Destination: "/foobar",
Type: "bind",
Source: "foobar",
- Options: []string{"ro", "rbind", "rprivate"},
+ Options: []string{"ro", "rbind", "private"},
}
config := createConfig{
Volumes: []string{"foobar:/foobar:ro"},
diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go
index ac97cad95..29065e031 100644
--- a/vendor/github.com/containers/image/copy/copy.go
+++ b/vendor/github.com/containers/image/copy/copy.go
@@ -368,6 +368,15 @@ func (ic *imageCopier) copyLayers() error {
srcInfos := ic.src.LayerInfos()
destInfos := []types.BlobInfo{}
diffIDs := []digest.Digest{}
+ updatedSrcInfos := ic.src.LayerInfosForCopy()
+ srcInfosUpdated := false
+ if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) {
+ if !ic.canModifyManifest {
+ return errors.Errorf("Internal error: copyLayers() needs to use an updated manifest but that was known to be forbidden")
+ }
+ srcInfos = updatedSrcInfos
+ srcInfosUpdated = true
+ }
for _, srcLayer := range srcInfos {
var (
destInfo types.BlobInfo
@@ -396,7 +405,7 @@ func (ic *imageCopier) copyLayers() error {
if ic.diffIDsAreNeeded {
ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs
}
- if layerDigestsDiffer(srcInfos, destInfos) {
+ if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) {
ic.manifestUpdates.LayerInfos = destInfos
}
return nil
diff --git a/vendor/github.com/containers/image/directory/directory_src.go b/vendor/github.com/containers/image/directory/directory_src.go
index b362f5422..0a8acf6bf 100644
--- a/vendor/github.com/containers/image/directory/directory_src.go
+++ b/vendor/github.com/containers/image/directory/directory_src.go
@@ -82,3 +82,8 @@ func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *dige
}
return signatures, nil
}
+
+// LayerInfosForCopy() returns updated layer info that should be used when copying, in preference to values in the manifest, if specified.
+func (s *dirImageSource) LayerInfosForCopy() []types.BlobInfo {
+ return nil
+}
diff --git a/vendor/github.com/containers/image/docker/archive/src.go b/vendor/github.com/containers/image/docker/archive/src.go
index aebcaa82a..b2ffd965d 100644
--- a/vendor/github.com/containers/image/docker/archive/src.go
+++ b/vendor/github.com/containers/image/docker/archive/src.go
@@ -34,3 +34,8 @@ func (s *archiveImageSource) Reference() types.ImageReference {
func (s *archiveImageSource) Close() error {
return nil
}
+
+// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
+func (s *archiveImageSource) LayerInfosForCopy() []types.BlobInfo {
+ return nil
+}
diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/docker/daemon/daemon_src.go
index 3bd4ad26d..5cf7679b1 100644
--- a/vendor/github.com/containers/image/docker/daemon/daemon_src.go
+++ b/vendor/github.com/containers/image/docker/daemon/daemon_src.go
@@ -81,3 +81,8 @@ func (s *daemonImageSource) Reference() types.ImageReference {
func (s *daemonImageSource) Close() error {
return os.Remove(s.tarCopyPath)
}
+
+// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
+func (s *daemonImageSource) LayerInfosForCopy() []types.BlobInfo {
+ return nil
+}
diff --git a/vendor/github.com/containers/image/docker/docker_image_src.go b/vendor/github.com/containers/image/docker/docker_image_src.go
index 259de0db1..63bfe8aa4 100644
--- a/vendor/github.com/containers/image/docker/docker_image_src.go
+++ b/vendor/github.com/containers/image/docker/docker_image_src.go
@@ -52,6 +52,11 @@ func (s *dockerImageSource) Close() error {
return nil
}
+// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
+func (s *dockerImageSource) LayerInfosForCopy() []types.BlobInfo {
+ return nil
+}
+
// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1)
// Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string.
func simplifyContentType(contentType string) string {
diff --git a/vendor/github.com/containers/image/docker/tarfile/dest.go b/vendor/github.com/containers/image/docker/tarfile/dest.go
index 6e042582e..eb11ca866 100644
--- a/vendor/github.com/containers/image/docker/tarfile/dest.go
+++ b/vendor/github.com/containers/image/docker/tarfile/dest.go
@@ -167,7 +167,7 @@ func (d *Destination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
func (d *Destination) PutManifest(m []byte) error {
// We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative,
// so the caller trying a different manifest kind would be pointless.
- var man schema2Manifest
+ var man manifest.Schema2
if err := json.Unmarshal(m, &man); err != nil {
return errors.Wrap(err, "Error parsing manifest")
}
@@ -176,12 +176,12 @@ func (d *Destination) PutManifest(m []byte) error {
}
layerPaths := []string{}
- for _, l := range man.Layers {
+ for _, l := range man.LayersDescriptors {
layerPaths = append(layerPaths, l.Digest.String())
}
items := []ManifestItem{{
- Config: man.Config.Digest.String(),
+ Config: man.ConfigDescriptor.Digest.String(),
RepoTags: []string{d.repoTag},
Layers: layerPaths,
Parent: "",
diff --git a/vendor/github.com/containers/image/docker/tarfile/src.go b/vendor/github.com/containers/image/docker/tarfile/src.go
index e2252c640..a18e21058 100644
--- a/vendor/github.com/containers/image/docker/tarfile/src.go
+++ b/vendor/github.com/containers/image/docker/tarfile/src.go
@@ -24,8 +24,8 @@ type Source struct {
tarManifest *ManifestItem // nil if not available yet.
configBytes []byte
configDigest digest.Digest
- orderedDiffIDList []diffID
- knownLayers map[diffID]*layerInfo
+ orderedDiffIDList []digest.Digest
+ knownLayers map[digest.Digest]*layerInfo
// Other state
generatedManifest []byte // Private cache for GetManifest(), nil if not set yet.
}
@@ -156,7 +156,7 @@ func (s *Source) ensureCachedDataIsPresent() error {
if err != nil {
return err
}
- var parsedConfig image // Most fields ommitted, we only care about layer DiffIDs.
+ var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config)
}
@@ -194,12 +194,12 @@ func (s *Source) LoadTarManifest() ([]ManifestItem, error) {
return s.loadTarManifest()
}
-func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *image) (map[diffID]*layerInfo, error) {
+func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) {
// Collect layer data available in manifest and config.
if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) {
return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs))
}
- knownLayers := map[diffID]*layerInfo{}
+ knownLayers := map[digest.Digest]*layerInfo{}
unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes.
for i, diffID := range parsedConfig.RootFS.DiffIDs {
if _, ok := knownLayers[diffID]; ok {
@@ -260,23 +260,23 @@ func (s *Source) GetManifest(instanceDigest *digest.Digest) ([]byte, string, err
if err := s.ensureCachedDataIsPresent(); err != nil {
return nil, "", err
}
- m := schema2Manifest{
+ m := manifest.Schema2{
SchemaVersion: 2,
MediaType: manifest.DockerV2Schema2MediaType,
- Config: distributionDescriptor{
+ ConfigDescriptor: manifest.Schema2Descriptor{
MediaType: manifest.DockerV2Schema2ConfigMediaType,
Size: int64(len(s.configBytes)),
Digest: s.configDigest,
},
- Layers: []distributionDescriptor{},
+ LayersDescriptors: []manifest.Schema2Descriptor{},
}
for _, diffID := range s.orderedDiffIDList {
li, ok := s.knownLayers[diffID]
if !ok {
return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
}
- m.Layers = append(m.Layers, distributionDescriptor{
- Digest: digest.Digest(diffID), // diffID is a digest of the uncompressed tarball
+ m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{
+ Digest: diffID, // diffID is a digest of the uncompressed tarball
MediaType: manifest.DockerV2Schema2LayerMediaType,
Size: li.size,
})
@@ -312,7 +312,7 @@ func (s *Source) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
}
- if li, ok := s.knownLayers[diffID(info.Digest)]; ok { // diffID is a digest of the uncompressed tarball,
+ if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball,
stream, err := s.openTarComponent(li.path)
if err != nil {
return nil, 0, err
diff --git a/vendor/github.com/containers/image/docker/tarfile/types.go b/vendor/github.com/containers/image/docker/tarfile/types.go
index f16cc8c62..2aa567545 100644
--- a/vendor/github.com/containers/image/docker/tarfile/types.go
+++ b/vendor/github.com/containers/image/docker/tarfile/types.go
@@ -1,6 +1,9 @@
package tarfile
-import "github.com/opencontainers/go-digest"
+import (
+ "github.com/containers/image/manifest"
+ "github.com/opencontainers/go-digest"
+)
// Various data structures.
@@ -18,37 +21,8 @@ type ManifestItem struct {
Config string
RepoTags []string
Layers []string
- Parent imageID `json:",omitempty"`
- LayerSources map[diffID]distributionDescriptor `json:",omitempty"`
+ Parent imageID `json:",omitempty"`
+ LayerSources map[digest.Digest]manifest.Schema2Descriptor `json:",omitempty"`
}
type imageID string
-type diffID digest.Digest
-
-// Based on github.com/docker/distribution/blobs.go
-type distributionDescriptor struct {
- MediaType string `json:"mediaType,omitempty"`
- Size int64 `json:"size,omitempty"`
- Digest digest.Digest `json:"digest,omitempty"`
- URLs []string `json:"urls,omitempty"`
-}
-
-// Based on github.com/docker/distribution/manifest/schema2/manifest.go
-// FIXME: We are repeating this all over the place; make a public copy?
-type schema2Manifest struct {
- SchemaVersion int `json:"schemaVersion"`
- MediaType string `json:"mediaType,omitempty"`
- Config distributionDescriptor `json:"config"`
- Layers []distributionDescriptor `json:"layers"`
-}
-
-// Based on github.com/docker/docker/image/image.go
-// MOST CONTENT OMITTED AS UNNECESSARY
-type image struct {
- RootFS *rootFS `json:"rootfs,omitempty"`
-}
-
-type rootFS struct {
- Type string `json:"type"`
- DiffIDs []diffID `json:"diff_ids,omitempty"`
-}
diff --git a/vendor/github.com/containers/image/image/docker_list.go b/vendor/github.com/containers/image/image/docker_list.go
index 53af9fbd7..412261ddb 100644
--- a/vendor/github.com/containers/image/image/docker_list.go
+++ b/vendor/github.com/containers/image/image/docker_list.go
@@ -22,7 +22,7 @@ type platformSpec struct {
// A manifestDescriptor references a platform-specific manifest.
type manifestDescriptor struct {
- descriptor
+ manifest.Schema2Descriptor
Platform platformSpec `json:"platform"`
}
diff --git a/vendor/github.com/containers/image/image/docker_schema1.go b/vendor/github.com/containers/image/image/docker_schema1.go
index 4c3c78acd..c6a6989de 100644
--- a/vendor/github.com/containers/image/image/docker_schema1.go
+++ b/vendor/github.com/containers/image/image/docker_schema1.go
@@ -2,9 +2,6 @@ package image
import (
"encoding/json"
- "regexp"
- "strings"
- "time"
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
@@ -14,87 +11,25 @@ import (
"github.com/pkg/errors"
)
-var (
- validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
-)
-
-type fsLayersSchema1 struct {
- BlobSum digest.Digest `json:"blobSum"`
-}
-
-type historySchema1 struct {
- V1Compatibility string `json:"v1Compatibility"`
-}
-
-// historySchema1 is a string containing this. It is similar to v1Image but not the same, in particular note the ThrowAway field.
-type v1Compatibility struct {
- ID string `json:"id"`
- Parent string `json:"parent,omitempty"`
- Comment string `json:"comment,omitempty"`
- Created time.Time `json:"created"`
- ContainerConfig struct {
- Cmd []string
- } `json:"container_config,omitempty"`
- Author string `json:"author,omitempty"`
- ThrowAway bool `json:"throwaway,omitempty"`
-}
-
type manifestSchema1 struct {
- Name string `json:"name"`
- Tag string `json:"tag"`
- Architecture string `json:"architecture"`
- FSLayers []fsLayersSchema1 `json:"fsLayers"`
- History []historySchema1 `json:"history"`
- SchemaVersion int `json:"schemaVersion"`
+ m *manifest.Schema1
}
-func manifestSchema1FromManifest(manifest []byte) (genericManifest, error) {
- mschema1 := &manifestSchema1{}
- if err := json.Unmarshal(manifest, mschema1); err != nil {
- return nil, err
- }
- if mschema1.SchemaVersion != 1 {
- return nil, errors.Errorf("unsupported schema version %d", mschema1.SchemaVersion)
- }
- if len(mschema1.FSLayers) != len(mschema1.History) {
- return nil, errors.New("length of history not equal to number of layers")
- }
- if len(mschema1.FSLayers) == 0 {
- return nil, errors.New("no FSLayers in manifest")
- }
-
- if err := fixManifestLayers(mschema1); err != nil {
+func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) {
+ m, err := manifest.Schema1FromManifest(manifestBlob)
+ if err != nil {
return nil, err
}
- return mschema1, nil
+ return &manifestSchema1{m: m}, nil
}
// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data.
-func manifestSchema1FromComponents(ref reference.Named, fsLayers []fsLayersSchema1, history []historySchema1, architecture string) genericManifest {
- var name, tag string
- if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them.
- name = reference.Path(ref)
- if tagged, ok := ref.(reference.NamedTagged); ok {
- tag = tagged.Tag()
- }
- }
- return &manifestSchema1{
- Name: name,
- Tag: tag,
- Architecture: architecture,
- FSLayers: fsLayers,
- History: history,
- SchemaVersion: 1,
- }
+func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) genericManifest {
+ return &manifestSchema1{m: manifest.Schema1FromComponents(ref, fsLayers, history, architecture)}
}
func (m *manifestSchema1) serialize() ([]byte, error) {
- // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType.
- unsigned, err := json.Marshal(*m)
- if err != nil {
- return nil, err
- }
- return manifest.AddDummyV2S1Signature(unsigned)
+ return m.m.Serialize()
}
func (m *manifestSchema1) manifestMIMEType() string {
@@ -104,7 +39,7 @@ func (m *manifestSchema1) manifestMIMEType() string {
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
func (m *manifestSchema1) ConfigInfo() types.BlobInfo {
- return types.BlobInfo{}
+ return m.m.ConfigInfo()
}
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
@@ -128,11 +63,7 @@ func (m *manifestSchema1) OCIConfig() (*imgspecv1.Image, error) {
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
func (m *manifestSchema1) LayerInfos() []types.BlobInfo {
- layers := make([]types.BlobInfo, len(m.FSLayers))
- for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway)
- layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1}
- }
- return layers
+ return m.m.LayerInfos()
}
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
@@ -153,25 +84,11 @@ func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named)
} else {
tag = ""
}
- return m.Name != name || m.Tag != tag
+ return m.m.Name != name || m.m.Tag != tag
}
func (m *manifestSchema1) imageInspectInfo() (*types.ImageInspectInfo, error) {
- v1 := &v1Image{}
- if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), v1); err != nil {
- return nil, err
- }
- i := &types.ImageInspectInfo{
- Tag: m.Tag,
- DockerVersion: v1.DockerVersion,
- Created: v1.Created,
- Architecture: v1.Architecture,
- Os: v1.OS,
- }
- if v1.Config != nil {
- i.Labels = v1.Config.Labels
- }
- return i, nil
+ return m.m.Inspect(nil)
}
// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
@@ -184,25 +101,18 @@ func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUp
// UpdatedImage returns a types.Image modified according to options.
// This does not change the state of the original Image object.
func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) {
- copy := *m
+ copy := manifestSchema1{m: manifest.Schema1Clone(m.m)}
if options.LayerInfos != nil {
- // Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well.
- if len(copy.FSLayers) != len(options.LayerInfos) {
- return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.FSLayers), len(options.LayerInfos))
- }
- for i, info := range options.LayerInfos {
- // (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest,
- // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness.
- // So, we don't bother recomputing the IDs in m.History.V1Compatibility.
- copy.FSLayers[(len(options.LayerInfos)-1)-i].BlobSum = info.Digest
+ if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
+ return nil, err
}
}
if options.EmbeddedDockerReference != nil {
- copy.Name = reference.Path(options.EmbeddedDockerReference)
+ copy.m.Name = reference.Path(options.EmbeddedDockerReference)
if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged {
- copy.Tag = tagged.Tag()
+ copy.m.Tag = tagged.Tag()
} else {
- copy.Tag = ""
+ copy.m.Tag = ""
}
}
@@ -234,102 +144,32 @@ func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (typ
return memoryImageFromManifest(&copy), nil
}
-// fixManifestLayers, after validating the supplied manifest
-// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in manifest.History),
-// modifies manifest to only have one entry for each layer ID in manifest.History (deleting the older duplicates,
-// both from manifest.History and manifest.FSLayers).
-// Note that even after this succeeds, manifest.FSLayers may contain duplicate entries
-// (for Dockerfile operations which change the configuration but not the filesystem).
-func fixManifestLayers(manifest *manifestSchema1) error {
- type imageV1 struct {
- ID string
- Parent string
- }
- // Per the specification, we can assume that len(manifest.FSLayers) == len(manifest.History)
- imgs := make([]*imageV1, len(manifest.FSLayers))
- for i := range manifest.FSLayers {
- img := &imageV1{}
-
- if err := json.Unmarshal([]byte(manifest.History[i].V1Compatibility), img); err != nil {
- return err
- }
-
- imgs[i] = img
- if err := validateV1ID(img.ID); err != nil {
- return err
- }
- }
- if imgs[len(imgs)-1].Parent != "" {
- return errors.New("Invalid parent ID in the base layer of the image")
- }
- // check general duplicates to error instead of a deadlock
- idmap := make(map[string]struct{})
- var lastID string
- for _, img := range imgs {
- // skip IDs that appear after each other, we handle those later
- if _, exists := idmap[img.ID]; img.ID != lastID && exists {
- return errors.Errorf("ID %+v appears multiple times in manifest", img.ID)
- }
- lastID = img.ID
- idmap[lastID] = struct{}{}
- }
- // backwards loop so that we keep the remaining indexes after removing items
- for i := len(imgs) - 2; i >= 0; i-- {
- if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
- manifest.FSLayers = append(manifest.FSLayers[:i], manifest.FSLayers[i+1:]...)
- manifest.History = append(manifest.History[:i], manifest.History[i+1:]...)
- } else if imgs[i].Parent != imgs[i+1].ID {
- return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent)
- }
- }
- return nil
-}
-
-func validateV1ID(id string) error {
- if ok := validHex.MatchString(id); !ok {
- return errors.Errorf("image ID %q is invalid", id)
- }
- return nil
-}
-
// Based on github.com/docker/docker/distribution/pull_v2.go
func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (genericManifest, error) {
- if len(m.History) == 0 {
+ if len(m.m.History) == 0 {
// What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType)
}
- if len(m.History) != len(m.FSLayers) {
- return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.History), len(m.FSLayers))
+ if len(m.m.History) != len(m.m.FSLayers) {
+ return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.History), len(m.m.FSLayers))
}
- if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.FSLayers) {
- return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.FSLayers))
+ if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) {
+ return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers))
}
- if layerDiffIDs != nil && len(layerDiffIDs) != len(m.FSLayers) {
- return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.FSLayers))
+ if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) {
+ return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers))
}
- rootFS := rootFS{
- Type: "layers",
- DiffIDs: []digest.Digest{},
- BaseLayer: "",
- }
- var layers []descriptor
- history := make([]imageHistory, len(m.History))
- for v1Index := len(m.History) - 1; v1Index >= 0; v1Index-- {
- v2Index := (len(m.History) - 1) - v1Index
+ // Build a list of the diffIDs for the non-empty layers.
+ diffIDs := []digest.Digest{}
+ var layers []manifest.Schema2Descriptor
+ for v1Index := len(m.m.History) - 1; v1Index >= 0; v1Index-- {
+ v2Index := (len(m.m.History) - 1) - v1Index
- var v1compat v1Compatibility
- if err := json.Unmarshal([]byte(m.History[v1Index].V1Compatibility), &v1compat); err != nil {
+ var v1compat manifest.Schema1V1Compatibility
+ if err := json.Unmarshal([]byte(m.m.History[v1Index].V1Compatibility), &v1compat); err != nil {
return nil, errors.Wrapf(err, "Error decoding history entry %d", v1Index)
}
- history[v2Index] = imageHistory{
- Created: v1compat.Created,
- Author: v1compat.Author,
- CreatedBy: strings.Join(v1compat.ContainerConfig.Cmd, " "),
- Comment: v1compat.Comment,
- EmptyLayer: v1compat.ThrowAway,
- }
-
if !v1compat.ThrowAway {
var size int64
if uploadedLayerInfos != nil {
@@ -339,19 +179,19 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl
if layerDiffIDs != nil {
d = layerDiffIDs[v2Index]
}
- layers = append(layers, descriptor{
+ layers = append(layers, manifest.Schema2Descriptor{
MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
Size: size,
- Digest: m.FSLayers[v1Index].BlobSum,
+ Digest: m.m.FSLayers[v1Index].BlobSum,
})
- rootFS.DiffIDs = append(rootFS.DiffIDs, d)
+ diffIDs = append(diffIDs, d)
}
}
- configJSON, err := configJSONFromV1Config([]byte(m.History[0].V1Compatibility), rootFS, history)
+ configJSON, err := m.m.ToSchema2(diffIDs)
if err != nil {
return nil, err
}
- configDescriptor := descriptor{
+ configDescriptor := manifest.Schema2Descriptor{
MediaType: "application/vnd.docker.container.image.v1+json",
Size: int64(len(configJSON)),
Digest: digest.FromBytes(configJSON),
@@ -359,33 +199,3 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl
return manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers), nil
}
-
-func configJSONFromV1Config(v1ConfigJSON []byte, rootFS rootFS, history []imageHistory) ([]byte, error) {
- // github.com/docker/docker/image/v1/imagev1.go:MakeConfigFromV1Config unmarshals and re-marshals the input if docker_version is < 1.8.3 to remove blank fields;
- // we don't do that here. FIXME? Should we? AFAICT it would only affect the digest value of the schema2 manifest, and we don't particularly need that to be
- // a consistently reproducible value.
-
- // Preserve everything we don't specifically know about.
- // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.)
- rawContents := map[string]*json.RawMessage{}
- if err := json.Unmarshal(v1ConfigJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?!
- return nil, err
- }
-
- delete(rawContents, "id")
- delete(rawContents, "parent")
- delete(rawContents, "Size")
- delete(rawContents, "parent_id")
- delete(rawContents, "layer_id")
- delete(rawContents, "throwaway")
-
- updates := map[string]interface{}{"rootfs": rootFS, "history": history}
- for field, value := range updates {
- encoded, err := json.Marshal(value)
- if err != nil {
- return nil, err
- }
- rawContents[field] = (*json.RawMessage)(&encoded)
- }
- return json.Marshal(rawContents)
-}
diff --git a/vendor/github.com/containers/image/image/docker_schema2.go b/vendor/github.com/containers/image/image/docker_schema2.go
index 848e8743c..b43bc17cf 100644
--- a/vendor/github.com/containers/image/image/docker_schema2.go
+++ b/vendor/github.com/containers/image/image/docker_schema2.go
@@ -29,54 +29,44 @@ var gzippedEmptyLayer = []byte{
// gzippedEmptyLayerDigest is a digest of gzippedEmptyLayer
const gzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
-type descriptor struct {
- MediaType string `json:"mediaType"`
- Size int64 `json:"size"`
- Digest digest.Digest `json:"digest"`
- URLs []string `json:"urls,omitempty"`
-}
-
type manifestSchema2 struct {
- src types.ImageSource // May be nil if configBlob is not nil
- configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
- SchemaVersion int `json:"schemaVersion"`
- MediaType string `json:"mediaType"`
- ConfigDescriptor descriptor `json:"config"`
- LayersDescriptors []descriptor `json:"layers"`
+ src types.ImageSource // May be nil if configBlob is not nil
+ configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
+ m *manifest.Schema2
}
-func manifestSchema2FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) {
- v2s2 := manifestSchema2{src: src}
- if err := json.Unmarshal(manifest, &v2s2); err != nil {
+func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) {
+ m, err := manifest.Schema2FromManifest(manifestBlob)
+ if err != nil {
return nil, err
}
- return &v2s2, nil
+ return &manifestSchema2{
+ src: src,
+ m: m,
+ }, nil
}
// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data:
-func manifestSchema2FromComponents(config descriptor, src types.ImageSource, configBlob []byte, layers []descriptor) genericManifest {
+func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) genericManifest {
return &manifestSchema2{
- src: src,
- configBlob: configBlob,
- SchemaVersion: 2,
- MediaType: manifest.DockerV2Schema2MediaType,
- ConfigDescriptor: config,
- LayersDescriptors: layers,
+ src: src,
+ configBlob: configBlob,
+ m: manifest.Schema2FromComponents(config, layers),
}
}
func (m *manifestSchema2) serialize() ([]byte, error) {
- return json.Marshal(*m)
+ return m.m.Serialize()
}
func (m *manifestSchema2) manifestMIMEType() string {
- return m.MediaType
+ return m.m.MediaType
}
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
func (m *manifestSchema2) ConfigInfo() types.BlobInfo {
- return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size}
+ return m.m.ConfigInfo()
}
// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
@@ -105,9 +95,9 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) {
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
}
stream, _, err := m.src.GetBlob(types.BlobInfo{
- Digest: m.ConfigDescriptor.Digest,
- Size: m.ConfigDescriptor.Size,
- URLs: m.ConfigDescriptor.URLs,
+ Digest: m.m.ConfigDescriptor.Digest,
+ Size: m.m.ConfigDescriptor.Size,
+ URLs: m.m.ConfigDescriptor.URLs,
})
if err != nil {
return nil, err
@@ -118,8 +108,8 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) {
return nil, err
}
computedDigest := digest.FromBytes(blob)
- if computedDigest != m.ConfigDescriptor.Digest {
- return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest)
+ if computedDigest != m.m.ConfigDescriptor.Digest {
+ return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest)
}
m.configBlob = blob
}
@@ -130,15 +120,7 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) {
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
func (m *manifestSchema2) LayerInfos() []types.BlobInfo {
- blobs := []types.BlobInfo{}
- for _, layer := range m.LayersDescriptors {
- blobs = append(blobs, types.BlobInfo{
- Digest: layer.Digest,
- Size: layer.Size,
- URLs: layer.URLs,
- })
- }
- return blobs
+ return m.m.LayerInfos()
}
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
@@ -149,24 +131,18 @@ func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named)
}
func (m *manifestSchema2) imageInspectInfo() (*types.ImageInspectInfo, error) {
- config, err := m.ConfigBlob()
- if err != nil {
- return nil, err
- }
- v1 := &v1Image{}
- if err := json.Unmarshal(config, v1); err != nil {
- return nil, err
- }
- i := &types.ImageInspectInfo{
- DockerVersion: v1.DockerVersion,
- Created: v1.Created,
- Architecture: v1.Architecture,
- Os: v1.OS,
- }
- if v1.Config != nil {
- i.Labels = v1.Config.Labels
+ getter := func(info types.BlobInfo) ([]byte, error) {
+ if info.Digest != m.ConfigInfo().Digest {
+ // Shouldn't ever happen
+ return nil, errors.New("asked for a different config blob")
+ }
+ config, err := m.ConfigBlob()
+ if err != nil {
+ return nil, err
+ }
+ return config, nil
}
- return i, nil
+ return m.m.Inspect(getter)
}
// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
@@ -179,17 +155,14 @@ func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUp
// UpdatedImage returns a types.Image modified according to options.
// This does not change the state of the original Image object.
func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) {
- copy := *m // NOTE: This is not a deep copy, it still shares slices etc.
+ copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc.
+ src: m.src,
+ configBlob: m.configBlob,
+ m: manifest.Schema2Clone(m.m),
+ }
if options.LayerInfos != nil {
- if len(copy.LayersDescriptors) != len(options.LayerInfos) {
- return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos))
- }
- copy.LayersDescriptors = make([]descriptor, len(options.LayerInfos))
- for i, info := range options.LayerInfos {
- copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType
- copy.LayersDescriptors[i].Digest = info.Digest
- copy.LayersDescriptors[i].Size = info.Size
- copy.LayersDescriptors[i].URLs = info.URLs
+ if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
+ return nil, err
}
}
// Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care.
@@ -207,6 +180,15 @@ func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (typ
return memoryImageFromManifest(&copy), nil
}
+func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor {
+ return imgspecv1.Descriptor{
+ MediaType: d.MediaType,
+ Size: d.Size,
+ Digest: d.Digest,
+ URLs: d.URLs,
+ }
+}
+
func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) {
configOCI, err := m.OCIConfig()
if err != nil {
@@ -217,18 +199,16 @@ func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) {
return nil, err
}
- config := descriptorOCI1{
- descriptor: descriptor{
- MediaType: imgspecv1.MediaTypeImageConfig,
- Size: int64(len(configOCIBytes)),
- Digest: digest.FromBytes(configOCIBytes),
- },
+ config := imgspecv1.Descriptor{
+ MediaType: imgspecv1.MediaTypeImageConfig,
+ Size: int64(len(configOCIBytes)),
+ Digest: digest.FromBytes(configOCIBytes),
}
- layers := make([]descriptorOCI1, len(m.LayersDescriptors))
+ layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors))
for idx := range layers {
- layers[idx] = descriptorOCI1{descriptor: m.LayersDescriptors[idx]}
- if m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType {
+ layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx])
+ if m.m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType {
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
} else {
// we assume layers are gzip'ed because docker v2s2 only deals with
@@ -247,14 +227,14 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
if err != nil {
return nil, err
}
- imageConfig := &image{}
+ imageConfig := &manifest.Schema2Image{}
if err := json.Unmarshal(configBytes, imageConfig); err != nil {
return nil, err
}
// Build fsLayers and History, discarding all configs. We will patch the top-level config in later.
- fsLayers := make([]fsLayersSchema1, len(imageConfig.History))
- history := make([]historySchema1, len(imageConfig.History))
+ fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History))
+ history := make([]manifest.Schema1History, len(imageConfig.History))
nonemptyLayerIndex := 0
var parentV1ID string // Set in the loop
v1ID := ""
@@ -282,10 +262,10 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
}
blobDigest = gzippedEmptyLayerDigest
} else {
- if nonemptyLayerIndex >= len(m.LayersDescriptors) {
- return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.LayersDescriptors))
+ if nonemptyLayerIndex >= len(m.m.LayersDescriptors) {
+ return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors))
}
- blobDigest = m.LayersDescriptors[nonemptyLayerIndex].Digest
+ blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest
nonemptyLayerIndex++
}
@@ -296,7 +276,7 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
}
v1ID = v
- fakeImage := v1Compatibility{
+ fakeImage := manifest.Schema1V1Compatibility{
ID: v1ID,
Parent: parentV1ID,
Comment: historyEntry.Comment,
@@ -310,8 +290,8 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
}
- fsLayers[v1Index] = fsLayersSchema1{BlobSum: blobDigest}
- history[v1Index] = historySchema1{V1Compatibility: string(v1CompatibilityBytes)}
+ fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest}
+ history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)}
// Note that parentV1ID of the top layer is preserved when exiting this loop
}
diff --git a/vendor/github.com/containers/image/image/manifest.go b/vendor/github.com/containers/image/image/manifest.go
index 132fdd58b..cdd4233f7 100644
--- a/vendor/github.com/containers/image/image/manifest.go
+++ b/vendor/github.com/containers/image/image/manifest.go
@@ -1,57 +1,14 @@
package image
import (
- "time"
+ "fmt"
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
- "github.com/containers/image/pkg/strslice"
"github.com/containers/image/types"
- "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
-type config struct {
- Cmd strslice.StrSlice
- Labels map[string]string
-}
-
-type v1Image struct {
- ID string `json:"id,omitempty"`
- Parent string `json:"parent,omitempty"`
- Comment string `json:"comment,omitempty"`
- Created time.Time `json:"created"`
- ContainerConfig *config `json:"container_config,omitempty"`
- DockerVersion string `json:"docker_version,omitempty"`
- Author string `json:"author,omitempty"`
- // Config is the configuration of the container received from the client
- Config *config `json:"config,omitempty"`
- // Architecture is the hardware that the image is build and runs on
- Architecture string `json:"architecture,omitempty"`
- // OS is the operating system used to build and run the image
- OS string `json:"os,omitempty"`
-}
-
-type image struct {
- v1Image
- History []imageHistory `json:"history,omitempty"`
- RootFS *rootFS `json:"rootfs,omitempty"`
-}
-
-type imageHistory struct {
- Created time.Time `json:"created"`
- Author string `json:"author,omitempty"`
- CreatedBy string `json:"created_by,omitempty"`
- Comment string `json:"comment,omitempty"`
- EmptyLayer bool `json:"empty_layer,omitempty"`
-}
-
-type rootFS struct {
- Type string `json:"type"`
- DiffIDs []digest.Digest `json:"diff_ids,omitempty"`
- BaseLayer string `json:"base_layer,omitempty"`
-}
-
// genericManifest is an interface for parsing, modifying image manifests and related data.
// Note that the public methods are intended to be a subset of types.Image
// so that embedding a genericManifest into structs works.
@@ -90,11 +47,8 @@ type genericManifest interface {
// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src.
// If manblob is a manifest list, it implicitly chooses an appropriate image from the list.
func manifestInstanceFromBlob(ctx *types.SystemContext, src types.ImageSource, manblob []byte, mt string) (genericManifest, error) {
- switch mt {
- // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md .
- // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might
- // need to happen within the ImageSource.
- case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, "application/json":
+ switch manifest.NormalizedMIMEType(mt) {
+ case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType:
return manifestSchema1FromManifest(manblob)
case imgspecv1.MediaTypeImageManifest:
return manifestOCI1FromManifest(src, manblob)
@@ -102,30 +56,12 @@ func manifestInstanceFromBlob(ctx *types.SystemContext, src types.ImageSource, m
return manifestSchema2FromManifest(src, manblob)
case manifest.DockerV2ListMediaType:
return manifestSchema2FromManifestList(ctx, src, manblob)
- default:
- // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time
- // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108
- // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50
- //
- // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag.
- // This makes no real sense, but it happens
- // because requests for manifests are
- // redirected to a content distribution
- // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442
- return manifestSchema1FromManifest(manblob)
+ default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values.
+ return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt)
}
}
// inspectManifest is an implementation of types.Image.Inspect
func inspectManifest(m genericManifest) (*types.ImageInspectInfo, error) {
- info, err := m.imageInspectInfo()
- if err != nil {
- return nil, err
- }
- layers := m.LayerInfos()
- info.Layers = make([]string, len(layers))
- for i, layer := range layers {
- info.Layers[i] = layer.Digest.String()
- }
- return info, nil
+ return m.imageInspectInfo()
}
diff --git a/vendor/github.com/containers/image/image/memory.go b/vendor/github.com/containers/image/image/memory.go
index 646dbe249..4639c49a3 100644
--- a/vendor/github.com/containers/image/image/memory.go
+++ b/vendor/github.com/containers/image/image/memory.go
@@ -61,3 +61,10 @@ func (i *memoryImage) Signatures(ctx context.Context) ([][]byte, error) {
func (i *memoryImage) Inspect() (*types.ImageInspectInfo, error) {
return inspectManifest(i.genericManifest)
}
+
+// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest.
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (i *memoryImage) LayerInfosForCopy() []types.BlobInfo {
+ return nil
+}
diff --git a/vendor/github.com/containers/image/image/oci.go b/vendor/github.com/containers/image/image/oci.go
index 8fe851837..3c03e49bb 100644
--- a/vendor/github.com/containers/image/image/oci.go
+++ b/vendor/github.com/containers/image/image/oci.go
@@ -12,41 +12,34 @@ import (
"github.com/pkg/errors"
)
-type descriptorOCI1 struct {
- descriptor
- Annotations map[string]string `json:"annotations,omitempty"`
-}
-
type manifestOCI1 struct {
- src types.ImageSource // May be nil if configBlob is not nil
- configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
- SchemaVersion int `json:"schemaVersion"`
- ConfigDescriptor descriptorOCI1 `json:"config"`
- LayersDescriptors []descriptorOCI1 `json:"layers"`
- Annotations map[string]string `json:"annotations,omitempty"`
+ src types.ImageSource // May be nil if configBlob is not nil
+ configBlob []byte // If set, corresponds to contents of m.Config.
+ m *manifest.OCI1
}
-func manifestOCI1FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) {
- oci := manifestOCI1{src: src}
- if err := json.Unmarshal(manifest, &oci); err != nil {
+func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) {
+ m, err := manifest.OCI1FromManifest(manifestBlob)
+ if err != nil {
return nil, err
}
- return &oci, nil
+ return &manifestOCI1{
+ src: src,
+ m: m,
+ }, nil
}
// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data:
-func manifestOCI1FromComponents(config descriptorOCI1, src types.ImageSource, configBlob []byte, layers []descriptorOCI1) genericManifest {
+func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest {
return &manifestOCI1{
- src: src,
- configBlob: configBlob,
- SchemaVersion: 2,
- ConfigDescriptor: config,
- LayersDescriptors: layers,
+ src: src,
+ configBlob: configBlob,
+ m: manifest.OCI1FromComponents(config, layers),
}
}
func (m *manifestOCI1) serialize() ([]byte, error) {
- return json.Marshal(*m)
+ return m.m.Serialize()
}
func (m *manifestOCI1) manifestMIMEType() string {
@@ -56,7 +49,7 @@ func (m *manifestOCI1) manifestMIMEType() string {
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
func (m *manifestOCI1) ConfigInfo() types.BlobInfo {
- return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size, Annotations: m.ConfigDescriptor.Annotations}
+ return m.m.ConfigInfo()
}
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
@@ -67,9 +60,9 @@ func (m *manifestOCI1) ConfigBlob() ([]byte, error) {
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
}
stream, _, err := m.src.GetBlob(types.BlobInfo{
- Digest: m.ConfigDescriptor.Digest,
- Size: m.ConfigDescriptor.Size,
- URLs: m.ConfigDescriptor.URLs,
+ Digest: m.m.Config.Digest,
+ Size: m.m.Config.Size,
+ URLs: m.m.Config.URLs,
})
if err != nil {
return nil, err
@@ -80,8 +73,8 @@ func (m *manifestOCI1) ConfigBlob() ([]byte, error) {
return nil, err
}
computedDigest := digest.FromBytes(blob)
- if computedDigest != m.ConfigDescriptor.Digest {
- return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest)
+ if computedDigest != m.m.Config.Digest {
+ return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest)
}
m.configBlob = blob
}
@@ -107,11 +100,7 @@ func (m *manifestOCI1) OCIConfig() (*imgspecv1.Image, error) {
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
func (m *manifestOCI1) LayerInfos() []types.BlobInfo {
- blobs := []types.BlobInfo{}
- for _, layer := range m.LayersDescriptors {
- blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size, Annotations: layer.Annotations, URLs: layer.URLs, MediaType: layer.MediaType})
- }
- return blobs
+ return m.m.LayerInfos()
}
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
@@ -122,24 +111,18 @@ func (m *manifestOCI1) EmbeddedDockerReferenceConflicts(ref reference.Named) boo
}
func (m *manifestOCI1) imageInspectInfo() (*types.ImageInspectInfo, error) {
- config, err := m.ConfigBlob()
- if err != nil {
- return nil, err
- }
- v1 := &v1Image{}
- if err := json.Unmarshal(config, v1); err != nil {
- return nil, err
- }
- i := &types.ImageInspectInfo{
- DockerVersion: v1.DockerVersion,
- Created: v1.Created,
- Architecture: v1.Architecture,
- Os: v1.OS,
- }
- if v1.Config != nil {
- i.Labels = v1.Config.Labels
+ getter := func(info types.BlobInfo) ([]byte, error) {
+ if info.Digest != m.ConfigInfo().Digest {
+ // Shouldn't ever happen
+ return nil, errors.New("asked for a different config blob")
+ }
+ config, err := m.ConfigBlob()
+ if err != nil {
+ return nil, err
+ }
+ return config, nil
}
- return i, nil
+ return m.m.Inspect(getter)
}
// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
@@ -152,18 +135,14 @@ func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdat
// UpdatedImage returns a types.Image modified according to options.
// This does not change the state of the original Image object.
func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) {
- copy := *m // NOTE: This is not a deep copy, it still shares slices etc.
+ copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc.
+ src: m.src,
+ configBlob: m.configBlob,
+ m: manifest.OCI1Clone(m.m),
+ }
if options.LayerInfos != nil {
- if len(copy.LayersDescriptors) != len(options.LayerInfos) {
- return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos))
- }
- copy.LayersDescriptors = make([]descriptorOCI1, len(options.LayerInfos))
- for i, info := range options.LayerInfos {
- copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType
- copy.LayersDescriptors[i].Digest = info.Digest
- copy.LayersDescriptors[i].Size = info.Size
- copy.LayersDescriptors[i].Annotations = info.Annotations
- copy.LayersDescriptors[i].URLs = info.URLs
+ if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
+ return nil, err
}
}
// Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care.
@@ -179,17 +158,26 @@ func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.
return memoryImageFromManifest(&copy), nil
}
+func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor {
+ return manifest.Schema2Descriptor{
+ MediaType: d.MediaType,
+ Size: d.Size,
+ Digest: d.Digest,
+ URLs: d.URLs,
+ }
+}
+
func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) {
// Create a copy of the descriptor.
- config := m.ConfigDescriptor.descriptor
+ config := schema2DescriptorFromOCI1Descriptor(m.m.Config)
// The only difference between OCI and DockerSchema2 is the mediatypes. The
// media type of the manifest is handled by manifestSchema2FromComponents.
config.MediaType = manifest.DockerV2Schema2ConfigMediaType
- layers := make([]descriptor, len(m.LayersDescriptors))
+ layers := make([]manifest.Schema2Descriptor, len(m.m.Layers))
for idx := range layers {
- layers[idx] = m.LayersDescriptors[idx].descriptor
+ layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx])
layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType
}
diff --git a/vendor/github.com/containers/image/image/sourced.go b/vendor/github.com/containers/image/image/sourced.go
index bdda05a05..3477f341e 100644
--- a/vendor/github.com/containers/image/image/sourced.go
+++ b/vendor/github.com/containers/image/image/sourced.go
@@ -100,3 +100,7 @@ func (i *sourcedImage) Manifest() ([]byte, string, error) {
func (i *sourcedImage) Inspect() (*types.ImageInspectInfo, error) {
return inspectManifest(i.genericManifest)
}
+
+func (i *sourcedImage) LayerInfosForCopy() []types.BlobInfo {
+ return i.UnparsedImage.LayerInfosForCopy()
+}
diff --git a/vendor/github.com/containers/image/image/unparsed.go b/vendor/github.com/containers/image/image/unparsed.go
index 0a8f78b66..aff06d8ad 100644
--- a/vendor/github.com/containers/image/image/unparsed.go
+++ b/vendor/github.com/containers/image/image/unparsed.go
@@ -93,3 +93,10 @@ func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) {
}
return i.cachedSignatures, nil
}
+
+// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest.
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (i *UnparsedImage) LayerInfosForCopy() []types.BlobInfo {
+ return i.src.LayerInfosForCopy()
+}
diff --git a/vendor/github.com/containers/image/manifest/docker_schema1.go b/vendor/github.com/containers/image/manifest/docker_schema1.go
new file mode 100644
index 000000000..b1c1cfe9f
--- /dev/null
+++ b/vendor/github.com/containers/image/manifest/docker_schema1.go
@@ -0,0 +1,310 @@
+package manifest
+
+import (
+ "encoding/json"
+ "regexp"
+ "strings"
+ "time"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/types"
+ "github.com/docker/docker/api/types/versions"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1.
+type Schema1FSLayers struct {
+ BlobSum digest.Digest `json:"blobSum"`
+}
+
+// Schema1History is an entry of the "history" array in docker/distribution schema 1.
+type Schema1History struct {
+ V1Compatibility string `json:"v1Compatibility"`
+}
+
+// Schema1 is a manifest in docker/distribution schema 1.
+type Schema1 struct {
+ Name string `json:"name"`
+ Tag string `json:"tag"`
+ Architecture string `json:"architecture"`
+ FSLayers []Schema1FSLayers `json:"fsLayers"`
+ History []Schema1History `json:"history"`
+ SchemaVersion int `json:"schemaVersion"`
+}
+
+// Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1.
+type Schema1V1Compatibility struct {
+ ID string `json:"id"`
+ Parent string `json:"parent,omitempty"`
+ Comment string `json:"comment,omitempty"`
+ Created time.Time `json:"created"`
+ ContainerConfig struct {
+ Cmd []string
+ } `json:"container_config,omitempty"`
+ Author string `json:"author,omitempty"`
+ ThrowAway bool `json:"throwaway,omitempty"`
+}
+
+// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob.
+// (NOTE: The instance is not necessary a literal representation of the original blob,
+// layers with duplicate IDs are eliminated.)
+func Schema1FromManifest(manifest []byte) (*Schema1, error) {
+ s1 := Schema1{}
+ if err := json.Unmarshal(manifest, &s1); err != nil {
+ return nil, err
+ }
+ if s1.SchemaVersion != 1 {
+ return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion)
+ }
+ if len(s1.FSLayers) != len(s1.History) {
+ return nil, errors.New("length of history not equal to number of layers")
+ }
+ if len(s1.FSLayers) == 0 {
+ return nil, errors.New("no FSLayers in manifest")
+ }
+ if err := s1.fixManifestLayers(); err != nil {
+ return nil, err
+ }
+ return &s1, nil
+}
+
+// Schema1FromComponents creates an Schema1 manifest instance from the supplied data.
+func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) *Schema1 {
+ var name, tag string
+ if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them.
+ name = reference.Path(ref)
+ if tagged, ok := ref.(reference.NamedTagged); ok {
+ tag = tagged.Tag()
+ }
+ }
+ return &Schema1{
+ Name: name,
+ Tag: tag,
+ Architecture: architecture,
+ FSLayers: fsLayers,
+ History: history,
+ SchemaVersion: 1,
+ }
+}
+
+// Schema1Clone creates a copy of the supplied Schema1 manifest.
+func Schema1Clone(src *Schema1) *Schema1 {
+ copy := *src
+ return &copy
+}
+
+// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+func (m *Schema1) ConfigInfo() types.BlobInfo {
+ return types.BlobInfo{}
+}
+
+// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (m *Schema1) LayerInfos() []types.BlobInfo {
+ layers := make([]types.BlobInfo, len(m.FSLayers))
+ for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway)
+ layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1}
+ }
+ return layers
+}
+
+// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
+func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
+ // Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well.
+ if len(m.FSLayers) != len(layerInfos) {
+ return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos))
+ }
+ for i, info := range layerInfos {
+ // (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest,
+ // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness.
+ // So, we don't bother recomputing the IDs in m.History.V1Compatibility.
+ m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest
+ }
+ return nil
+}
+
+// Serialize returns the manifest in a blob format.
+// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
+func (m *Schema1) Serialize() ([]byte, error) {
+ // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType.
+ unsigned, err := json.Marshal(*m)
+ if err != nil {
+ return nil, err
+ }
+ return AddDummyV2S1Signature(unsigned)
+}
+
+// fixManifestLayers, after validating the supplied manifest
+// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in m.History),
+// modifies manifest to only have one entry for each layer ID in m.History (deleting the older duplicates,
+// both from m.History and m.FSLayers).
+// Note that even after this succeeds, m.FSLayers may contain duplicate entries
+// (for Dockerfile operations which change the configuration but not the filesystem).
+func (m *Schema1) fixManifestLayers() error {
+ type imageV1 struct {
+ ID string
+ Parent string
+ }
+ // Per the specification, we can assume that len(m.FSLayers) == len(m.History)
+ imgs := make([]*imageV1, len(m.FSLayers))
+ for i := range m.FSLayers {
+ img := &imageV1{}
+
+ if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil {
+ return err
+ }
+
+ imgs[i] = img
+ if err := validateV1ID(img.ID); err != nil {
+ return err
+ }
+ }
+ if imgs[len(imgs)-1].Parent != "" {
+ return errors.New("Invalid parent ID in the base layer of the image")
+ }
+ // check general duplicates to error instead of a deadlock
+ idmap := make(map[string]struct{})
+ var lastID string
+ for _, img := range imgs {
+ // skip IDs that appear after each other, we handle those later
+ if _, exists := idmap[img.ID]; img.ID != lastID && exists {
+ return errors.Errorf("ID %+v appears multiple times in manifest", img.ID)
+ }
+ lastID = img.ID
+ idmap[lastID] = struct{}{}
+ }
+ // backwards loop so that we keep the remaining indexes after removing items
+ for i := len(imgs) - 2; i >= 0; i-- {
+ if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
+ m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
+ m.History = append(m.History[:i], m.History[i+1:]...)
+ } else if imgs[i].Parent != imgs[i+1].ID {
+ return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent)
+ }
+ }
+ return nil
+}
+
+var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
+
+func validateV1ID(id string) error {
+ if ok := validHex.MatchString(id); !ok {
+ return errors.Errorf("image ID %q is invalid", id)
+ }
+ return nil
+}
+
+// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
+func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) {
+ s1 := &Schema2V1Image{}
+ if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil {
+ return nil, err
+ }
+ return &types.ImageInspectInfo{
+ Tag: m.Tag,
+ Created: s1.Created,
+ DockerVersion: s1.DockerVersion,
+ Labels: make(map[string]string),
+ Architecture: s1.Architecture,
+ Os: s1.OS,
+ Layers: LayerInfosToStrings(m.LayerInfos()),
+ }, nil
+}
+
+// ToSchema2 builds a schema2-style configuration blob using the supplied diffIDs.
+func (m *Schema1) ToSchema2(diffIDs []digest.Digest) ([]byte, error) {
+ // Convert the schema 1 compat info into a schema 2 config, constructing some of the fields
+ // that aren't directly comparable using info from the manifest.
+ if len(m.History) == 0 {
+ return nil, errors.New("image has no layers")
+ }
+ s2 := struct {
+ Schema2Image
+ ID string `json:"id,omitempty"`
+ Parent string `json:"parent,omitempty"`
+ ParentID string `json:"parent_id,omitempty"`
+ LayerID string `json:"layer_id,omitempty"`
+ ThrowAway bool `json:"throwaway,omitempty"`
+ Size int64 `json:",omitempty"`
+ }{}
+ config := []byte(m.History[0].V1Compatibility)
+ err := json.Unmarshal(config, &s2)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error decoding configuration")
+ }
+ // Images created with versions prior to 1.8.3 require us to re-encode the encoded object,
+ // adding some fields that aren't "omitempty".
+ if s2.DockerVersion != "" && versions.LessThan(s2.DockerVersion, "1.8.3") {
+ config, err = json.Marshal(&s2)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error re-encoding compat image config %#v", s2)
+ }
+ }
+ // Build the history.
+ convertedHistory := []Schema2History{}
+ for _, h := range m.History {
+ compat := Schema1V1Compatibility{}
+ if err := json.Unmarshal([]byte(h.V1Compatibility), &compat); err != nil {
+ return nil, errors.Wrapf(err, "error decoding history information")
+ }
+ hitem := Schema2History{
+ Created: compat.Created,
+ CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "),
+ Author: compat.Author,
+ Comment: compat.Comment,
+ EmptyLayer: compat.ThrowAway,
+ }
+ convertedHistory = append([]Schema2History{hitem}, convertedHistory...)
+ }
+ // Build the rootfs information. We need the decompressed sums that we've been
+ // calculating to fill in the DiffIDs. It's expected (but not enforced by us)
+ // that the number of diffIDs corresponds to the number of non-EmptyLayer
+ // entries in the history.
+ rootFS := &Schema2RootFS{
+ Type: "layers",
+ DiffIDs: diffIDs,
+ }
+ // And now for some raw manipulation.
+ raw := make(map[string]*json.RawMessage)
+ err = json.Unmarshal(config, &raw)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error re-decoding compat image config %#v: %v", s2)
+ }
+ // Drop some fields.
+ delete(raw, "id")
+ delete(raw, "parent")
+ delete(raw, "parent_id")
+ delete(raw, "layer_id")
+ delete(raw, "throwaway")
+ delete(raw, "Size")
+ // Add the history and rootfs information.
+ rootfs, err := json.Marshal(rootFS)
+ if err != nil {
+ return nil, errors.Errorf("error encoding rootfs information %#v: %v", rootFS, err)
+ }
+ rawRootfs := json.RawMessage(rootfs)
+ raw["rootfs"] = &rawRootfs
+ history, err := json.Marshal(convertedHistory)
+ if err != nil {
+ return nil, errors.Errorf("error encoding history information %#v: %v", convertedHistory, err)
+ }
+ rawHistory := json.RawMessage(history)
+ raw["history"] = &rawHistory
+ // Encode the result.
+ config, err = json.Marshal(raw)
+ if err != nil {
+ return nil, errors.Errorf("error re-encoding compat image config %#v: %v", s2, err)
+ }
+ return config, nil
+}
+
+// ImageID computes an ID which can uniquely identify this image by its contents.
+func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) {
+ image, err := m.ToSchema2(diffIDs)
+ if err != nil {
+ return "", err
+ }
+ return digest.FromBytes(image).Hex(), nil
+}
diff --git a/vendor/github.com/containers/image/manifest/docker_schema2.go b/vendor/github.com/containers/image/manifest/docker_schema2.go
new file mode 100644
index 000000000..ef82ffc24
--- /dev/null
+++ b/vendor/github.com/containers/image/manifest/docker_schema2.go
@@ -0,0 +1,251 @@
+package manifest
+
+import (
+ "encoding/json"
+ "time"
+
+ "github.com/containers/image/pkg/strslice"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+// Schema2Descriptor is a “descriptor” in docker/distribution schema 2.
+type Schema2Descriptor struct {
+ MediaType string `json:"mediaType"`
+ Size int64 `json:"size"`
+ Digest digest.Digest `json:"digest"`
+ URLs []string `json:"urls,omitempty"`
+}
+
+// Schema2 is a manifest in docker/distribution schema 2.
+type Schema2 struct {
+ SchemaVersion int `json:"schemaVersion"`
+ MediaType string `json:"mediaType"`
+ ConfigDescriptor Schema2Descriptor `json:"config"`
+ LayersDescriptors []Schema2Descriptor `json:"layers"`
+}
+
+// Schema2Port is a Port, a string containing port number and protocol in the
+// format "80/tcp", from docker/go-connections/nat.
+type Schema2Port string
+
+// Schema2PortSet is a PortSet, a collection of structs indexed by Port, from
+// docker/go-connections/nat.
+type Schema2PortSet map[Schema2Port]struct{}
+
+// Schema2HealthConfig is a HealthConfig, which holds configuration settings
+// for the HEALTHCHECK feature, from docker/docker/api/types/container.
+type Schema2HealthConfig struct {
+ // Test is the test to perform to check that the container is healthy.
+ // An empty slice means to inherit the default.
+ // The options are:
+ // {} : inherit healthcheck
+ // {"NONE"} : disable healthcheck
+ // {"CMD", args...} : exec arguments directly
+ // {"CMD-SHELL", command} : run command with system's default shell
+ Test []string `json:",omitempty"`
+
+ // Zero means to inherit. Durations are expressed as integer nanoseconds.
+ Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
+ Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
+
+ // Retries is the number of consecutive failures needed to consider a container as unhealthy.
+ // Zero means inherit.
+ Retries int `json:",omitempty"`
+}
+
+// Schema2Config is a Config in docker/docker/api/types/container.
+type Schema2Config struct {
+ Hostname string // Hostname
+ Domainname string // Domainname
+ User string // User that will run the command(s) inside the container, also support user:group
+ AttachStdin bool // Attach the standard input, makes possible user interaction
+ AttachStdout bool // Attach the standard output
+ AttachStderr bool // Attach the standard error
+ ExposedPorts Schema2PortSet `json:",omitempty"` // List of exposed ports
+ Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
+ OpenStdin bool // Open stdin
+ StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
+ Env []string // List of environment variable to set in the container
+ Cmd strslice.StrSlice // Command to run when starting the container
+ Healthcheck *Schema2HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
+ ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
+ Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
+ Volumes map[string]struct{} // List of volumes (mounts) used for the container
+ WorkingDir string // Current directory (PWD) in the command will be launched
+ Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
+ NetworkDisabled bool `json:",omitempty"` // Is network disabled
+ MacAddress string `json:",omitempty"` // Mac Address of the container
+ OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
+ Labels map[string]string // List of labels set to this container
+ StopSignal string `json:",omitempty"` // Signal to stop a container
+ StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
+ Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
+}
+
+// Schema2V1Image is a V1Image in docker/docker/image.
+type Schema2V1Image struct {
+ // ID is a unique 64 character identifier of the image
+ ID string `json:"id,omitempty"`
+ // Parent is the ID of the parent image
+ Parent string `json:"parent,omitempty"`
+ // Comment is the commit message that was set when committing the image
+ Comment string `json:"comment,omitempty"`
+ // Created is the timestamp at which the image was created
+ Created time.Time `json:"created"`
+ // Container is the id of the container used to commit
+ Container string `json:"container,omitempty"`
+ // ContainerConfig is the configuration of the container that is committed into the image
+ ContainerConfig Schema2Config `json:"container_config,omitempty"`
+ // DockerVersion specifies the version of Docker that was used to build the image
+ DockerVersion string `json:"docker_version,omitempty"`
+ // Author is the name of the author that was specified when committing the image
+ Author string `json:"author,omitempty"`
+ // Config is the configuration of the container received from the client
+ Config *Schema2Config `json:"config,omitempty"`
+ // Architecture is the hardware that the image is build and runs on
+ Architecture string `json:"architecture,omitempty"`
+ // OS is the operating system used to build and run the image
+ OS string `json:"os,omitempty"`
+ // Size is the total size of the image including all layers it is composed of
+ Size int64 `json:",omitempty"`
+}
+
+// Schema2RootFS is a description of how to build up an image's root filesystem, from docker/docker/image.
+type Schema2RootFS struct {
+ Type string `json:"type"`
+ DiffIDs []digest.Digest `json:"diff_ids,omitempty"`
+}
+
+// Schema2History stores build commands that were used to create an image, from docker/docker/image.
+type Schema2History struct {
+ // Created is the timestamp at which the image was created
+ Created time.Time `json:"created"`
+ // Author is the name of the author that was specified when committing the image
+ Author string `json:"author,omitempty"`
+ // CreatedBy keeps the Dockerfile command used while building the image
+ CreatedBy string `json:"created_by,omitempty"`
+ // Comment is the commit message that was set when committing the image
+ Comment string `json:"comment,omitempty"`
+ // EmptyLayer is set to true if this history item did not generate a
+ // layer. Otherwise, the history item is associated with the next
+ // layer in the RootFS section.
+ EmptyLayer bool `json:"empty_layer,omitempty"`
+}
+
+// Schema2Image is an Image in docker/docker/image.
+type Schema2Image struct {
+ Schema2V1Image
+ Parent digest.Digest `json:"parent,omitempty"`
+ RootFS *Schema2RootFS `json:"rootfs,omitempty"`
+ History []Schema2History `json:"history,omitempty"`
+ OSVersion string `json:"os.version,omitempty"`
+ OSFeatures []string `json:"os.features,omitempty"`
+
+ // rawJSON caches the immutable JSON associated with this image.
+ rawJSON []byte
+
+ // computedID is the ID computed from the hash of the image config.
+ // Not to be confused with the legacy V1 ID in V1Image.
+ computedID digest.Digest
+}
+
+// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob.
+func Schema2FromManifest(manifest []byte) (*Schema2, error) {
+ s2 := Schema2{}
+ if err := json.Unmarshal(manifest, &s2); err != nil {
+ return nil, err
+ }
+ return &s2, nil
+}
+
+// Schema2FromComponents creates an Schema2 manifest instance from the supplied data.
+func Schema2FromComponents(config Schema2Descriptor, layers []Schema2Descriptor) *Schema2 {
+ return &Schema2{
+ SchemaVersion: 2,
+ MediaType: DockerV2Schema2MediaType,
+ ConfigDescriptor: config,
+ LayersDescriptors: layers,
+ }
+}
+
+// Schema2Clone creates a copy of the supplied Schema2 manifest.
+func Schema2Clone(src *Schema2) *Schema2 {
+ copy := *src
+ return &copy
+}
+
+// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+func (m *Schema2) ConfigInfo() types.BlobInfo {
+ return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size}
+}
+
+// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (m *Schema2) LayerInfos() []types.BlobInfo {
+ blobs := []types.BlobInfo{}
+ for _, layer := range m.LayersDescriptors {
+ blobs = append(blobs, types.BlobInfo{
+ Digest: layer.Digest,
+ Size: layer.Size,
+ URLs: layer.URLs,
+ })
+ }
+ return blobs
+}
+
+// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
+func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
+ if len(m.LayersDescriptors) != len(layerInfos) {
+ return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos))
+ }
+ original := m.LayersDescriptors
+ m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos))
+ for i, info := range layerInfos {
+ m.LayersDescriptors[i].MediaType = original[i].MediaType
+ m.LayersDescriptors[i].Digest = info.Digest
+ m.LayersDescriptors[i].Size = info.Size
+ m.LayersDescriptors[i].URLs = info.URLs
+ }
+ return nil
+}
+
+// Serialize returns the manifest in a blob format.
+// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
+func (m *Schema2) Serialize() ([]byte, error) {
+ return json.Marshal(*m)
+}
+
+// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
+func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) {
+ config, err := configGetter(m.ConfigInfo())
+ if err != nil {
+ return nil, err
+ }
+ s2 := &Schema2Image{}
+ if err := json.Unmarshal(config, s2); err != nil {
+ return nil, err
+ }
+ i := &types.ImageInspectInfo{
+ Tag: "",
+ Created: s2.Created,
+ DockerVersion: s2.DockerVersion,
+ Architecture: s2.Architecture,
+ Os: s2.OS,
+ Layers: LayerInfosToStrings(m.LayerInfos()),
+ }
+ if s2.Config != nil {
+ i.Labels = s2.Config.Labels
+ }
+ return i, nil
+}
+
+// ImageID computes an ID which can uniquely identify this image by its contents.
+func (m *Schema2) ImageID([]digest.Digest) (string, error) {
+ if err := m.ConfigDescriptor.Digest.Validate(); err != nil {
+ return "", err
+ }
+ return m.ConfigDescriptor.Digest.Hex(), nil
+}
diff --git a/vendor/github.com/containers/image/manifest/manifest.go b/vendor/github.com/containers/image/manifest/manifest.go
index 2e67763f3..2bc801d81 100644
--- a/vendor/github.com/containers/image/manifest/manifest.go
+++ b/vendor/github.com/containers/image/manifest/manifest.go
@@ -2,7 +2,9 @@ package manifest
import (
"encoding/json"
+ "fmt"
+ "github.com/containers/image/types"
"github.com/docker/libtrust"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -38,6 +40,39 @@ var DefaultRequestedManifestMIMETypes = []string{
DockerV2ListMediaType,
}
+// Manifest is an interface for parsing, modifying image manifests in isolation.
+// Callers can either use this abstract interface without understanding the details of the formats,
+// or instantiate a specific implementation (e.g. manifest.OCI1) and access the public members
+// directly.
+//
+// See types.Image for functionality not limited to manifests, including format conversions and config parsing.
+// This interface is similar to, but not strictly equivalent to, the equivalent methods in types.Image.
+type Manifest interface {
+ // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+ ConfigInfo() types.BlobInfo
+ // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+ // The Digest field is guaranteed to be provided; Size may be -1.
+ // WARNING: The list may contain duplicates, and they are semantically relevant.
+ LayerInfos() []types.BlobInfo
+ // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
+ UpdateLayerInfos(layerInfos []types.BlobInfo) error
+
+ // ImageID computes an ID which can uniquely identify this image by its contents, irrespective
+ // of which (of possibly more than one simultaneously valid) reference was used to locate the
+ // image, and unchanged by whether or how the layers are compressed. The result takes the form
+ // of the hexadecimal portion of a digest.Digest.
+ ImageID(diffIDs []digest.Digest) (string, error)
+
+ // Inspect returns various information for (skopeo inspect) parsed from the manifest,
+ // incorporating information from a configuration blob returned by configGetter, if
+ // the underlying image format is expected to include a configuration blob.
+ Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error)
+
+ // Serialize returns the manifest in a blob format.
+ // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
+ Serialize() ([]byte, error)
+}
+
// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest,
// but we may not have such metadata available (e.g. when the manifest is a local file).
@@ -147,3 +182,57 @@ func AddDummyV2S1Signature(manifest []byte) ([]byte, error) {
func MIMETypeIsMultiImage(mimeType string) bool {
return mimeType == DockerV2ListMediaType
}
+
+// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server,
+// centralizing various workarounds.
+func NormalizedMIMEType(input string) string {
+ switch input {
+ // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md .
+ // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might
+ // need to happen within the ImageSource.
+ case "application/json":
+ return DockerV2Schema1SignedMediaType
+ case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType,
+ imgspecv1.MediaTypeImageManifest,
+ DockerV2Schema2MediaType,
+ DockerV2ListMediaType:
+ return input
+ default:
+ // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time
+ // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108
+ // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50
+ //
+ // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag.
+ // This makes no real sense, but it happens
+ // because requests for manifests are
+ // redirected to a content distribution
+ // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442
+ return DockerV2Schema1SignedMediaType
+ }
+}
+
+// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type
+func FromBlob(manblob []byte, mt string) (Manifest, error) {
+ switch NormalizedMIMEType(mt) {
+ case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType:
+ return Schema1FromManifest(manblob)
+ case imgspecv1.MediaTypeImageManifest:
+ return OCI1FromManifest(manblob)
+ case DockerV2Schema2MediaType:
+ return Schema2FromManifest(manblob)
+ case DockerV2ListMediaType:
+ return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented")
+ default: // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
+ return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt)
+ }
+}
+
+// LayerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
+// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure.
+func LayerInfosToStrings(infos []types.BlobInfo) []string {
+ layers := make([]string, len(infos))
+ for i, info := range infos {
+ layers[i] = info.Digest.String()
+ }
+ return layers
+}
diff --git a/vendor/github.com/containers/image/manifest/oci.go b/vendor/github.com/containers/image/manifest/oci.go
new file mode 100644
index 000000000..0ffb35b74
--- /dev/null
+++ b/vendor/github.com/containers/image/manifest/oci.go
@@ -0,0 +1,120 @@
+package manifest
+
+import (
+ "encoding/json"
+ "time"
+
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/opencontainers/image-spec/specs-go"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+// OCI1 is a manifest.Manifest implementation for OCI images.
+// The underlying data from imgspecv1.Manifest is also available.
+type OCI1 struct {
+ imgspecv1.Manifest
+}
+
+// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob.
+func OCI1FromManifest(manifest []byte) (*OCI1, error) {
+ oci1 := OCI1{}
+ if err := json.Unmarshal(manifest, &oci1); err != nil {
+ return nil, err
+ }
+ return &oci1, nil
+}
+
+// OCI1FromComponents creates an OCI1 manifest instance from the supplied data.
+func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descriptor) *OCI1 {
+ return &OCI1{
+ imgspecv1.Manifest{
+ Versioned: specs.Versioned{SchemaVersion: 2},
+ Config: config,
+ Layers: layers,
+ },
+ }
+}
+
+// OCI1Clone creates a copy of the supplied OCI1 manifest.
+func OCI1Clone(src *OCI1) *OCI1 {
+ return &OCI1{
+ Manifest: src.Manifest,
+ }
+}
+
+// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+func (m *OCI1) ConfigInfo() types.BlobInfo {
+ return types.BlobInfo{Digest: m.Config.Digest, Size: m.Config.Size, Annotations: m.Config.Annotations}
+}
+
+// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (m *OCI1) LayerInfos() []types.BlobInfo {
+ blobs := []types.BlobInfo{}
+ for _, layer := range m.Layers {
+ blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size, Annotations: layer.Annotations, URLs: layer.URLs, MediaType: layer.MediaType})
+ }
+ return blobs
+}
+
+// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
+func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
+ if len(m.Layers) != len(layerInfos) {
+ return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos))
+ }
+ original := m.Layers
+ m.Layers = make([]imgspecv1.Descriptor, len(layerInfos))
+ for i, info := range layerInfos {
+ m.Layers[i].MediaType = original[i].MediaType
+ m.Layers[i].Digest = info.Digest
+ m.Layers[i].Size = info.Size
+ m.Layers[i].Annotations = info.Annotations
+ m.Layers[i].URLs = info.URLs
+ }
+ return nil
+}
+
+// Serialize returns the manifest in a blob format.
+// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
+func (m *OCI1) Serialize() ([]byte, error) {
+ return json.Marshal(*m)
+}
+
+// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
+func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) {
+ config, err := configGetter(m.ConfigInfo())
+ if err != nil {
+ return nil, err
+ }
+ v1 := &imgspecv1.Image{}
+ if err := json.Unmarshal(config, v1); err != nil {
+ return nil, err
+ }
+ d1 := &Schema2V1Image{}
+ json.Unmarshal(config, d1)
+ created := time.Time{}
+ if v1.Created != nil {
+ created = *v1.Created
+ }
+ i := &types.ImageInspectInfo{
+ Tag: "",
+ Created: created,
+ DockerVersion: d1.DockerVersion,
+ Labels: v1.Config.Labels,
+ Architecture: v1.Architecture,
+ Os: v1.OS,
+ Layers: LayerInfosToStrings(m.LayerInfos()),
+ }
+ return i, nil
+}
+
+// ImageID computes an ID which can uniquely identify this image by its contents.
+func (m *OCI1) ImageID([]digest.Digest) (string, error) {
+ if err := m.Config.Digest.Validate(); err != nil {
+ return "", err
+ }
+ return m.Config.Digest.Hex(), nil
+}
diff --git a/vendor/github.com/containers/image/oci/archive/oci_src.go b/vendor/github.com/containers/image/oci/archive/oci_src.go
index 7d034a4f4..aee5d8d5b 100644
--- a/vendor/github.com/containers/image/oci/archive/oci_src.go
+++ b/vendor/github.com/containers/image/oci/archive/oci_src.go
@@ -88,3 +88,8 @@ func (s *ociArchiveImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int
func (s *ociArchiveImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
return s.unpackedSrc.GetSignatures(ctx, instanceDigest)
}
+
+// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
+func (s *ociArchiveImageSource) LayerInfosForCopy() []types.BlobInfo {
+ return nil
+}
diff --git a/vendor/github.com/containers/image/oci/layout/oci_src.go b/vendor/github.com/containers/image/oci/layout/oci_src.go
index f0dac34e5..1109f65c6 100644
--- a/vendor/github.com/containers/image/oci/layout/oci_src.go
+++ b/vendor/github.com/containers/image/oci/layout/oci_src.go
@@ -143,6 +143,11 @@ func (s *ociImageSource) getExternalBlob(urls []string) (io.ReadCloser, int64, e
return nil, 0, errWrap
}
+// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
+func (s *ociImageSource) LayerInfosForCopy() []types.BlobInfo {
+ return nil
+}
+
func getBlobSize(resp *http.Response) int64 {
size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
if err != nil {
diff --git a/vendor/github.com/containers/image/openshift/openshift.go b/vendor/github.com/containers/image/openshift/openshift.go
index 33d1a2bf2..546559144 100644
--- a/vendor/github.com/containers/image/openshift/openshift.go
+++ b/vendor/github.com/containers/image/openshift/openshift.go
@@ -246,6 +246,11 @@ func (s *openshiftImageSource) GetSignatures(ctx context.Context, instanceDigest
return sigs, nil
}
+// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
+func (s *openshiftImageSource) LayerInfosForCopy() []types.BlobInfo {
+ return nil
+}
+
// ensureImageIsResolved sets up s.docker and s.imageStreamImageName
func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error {
if s.docker != nil {
diff --git a/vendor/github.com/containers/image/ostree/ostree_dest.go b/vendor/github.com/containers/image/ostree/ostree_dest.go
index 704e1ecee..8154c9851 100644
--- a/vendor/github.com/containers/image/ostree/ostree_dest.go
+++ b/vendor/github.com/containers/image/ostree/ostree_dest.go
@@ -14,25 +14,30 @@ import (
"os/exec"
"path/filepath"
"strconv"
+ "syscall"
"time"
+ "unsafe"
"github.com/containers/image/manifest"
"github.com/containers/image/types"
"github.com/containers/storage/pkg/archive"
"github.com/opencontainers/go-digest"
+ selinux "github.com/opencontainers/selinux/go-selinux"
"github.com/ostreedev/ostree-go/pkg/otbuiltin"
"github.com/pkg/errors"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
)
-// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1
+// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 libselinux
// #include <glib.h>
// #include <glib-object.h>
// #include <gio/gio.h>
// #include <stdlib.h>
// #include <ostree.h>
// #include <gio/ginputstream.h>
+// #include <selinux/selinux.h>
+// #include <selinux/label.h>
import "C"
type blobToImport struct {
@@ -150,7 +155,7 @@ func (d *ostreeImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
return types.BlobInfo{Digest: computedDigest, Size: size}, nil
}
-func fixFiles(dir string, usermode bool) error {
+func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error {
entries, err := ioutil.ReadDir(dir)
if err != nil {
return err
@@ -164,13 +169,40 @@ func fixFiles(dir string, usermode bool) error {
}
continue
}
+
+ if selinuxHnd != nil {
+ relPath, err := filepath.Rel(root, fullpath)
+ if err != nil {
+ return err
+ }
+ relPath = fmt.Sprintf("/%s", relPath)
+
+ relPathC := C.CString(relPath)
+ defer C.free(unsafe.Pointer(relPathC))
+ var context *C.char
+
+ res, err := C.selabel_lookup_raw(selinuxHnd, &context, relPathC, C.int(info.Mode()&os.ModePerm))
+ if int(res) < 0 && err != syscall.ENOENT {
+ return errors.Wrapf(err, "cannot selabel_lookup_raw %s", relPath)
+ }
+ if int(res) == 0 {
+ defer C.freecon(context)
+ fullpathC := C.CString(fullpath)
+ defer C.free(unsafe.Pointer(fullpathC))
+ res, err = C.lsetfilecon_raw(fullpathC, context)
+ if int(res) < 0 {
+ return errors.Wrapf(err, "cannot setfilecon_raw %s", fullpath)
+ }
+ }
+ }
+
if info.IsDir() {
if usermode {
if err := os.Chmod(fullpath, info.Mode()|0700); err != nil {
return err
}
}
- err = fixFiles(fullpath, usermode)
+ err = fixFiles(selinuxHnd, root, fullpath, usermode)
if err != nil {
return err
}
@@ -223,7 +255,7 @@ func generateTarSplitMetadata(output *bytes.Buffer, file string) error {
return nil
}
-func (d *ostreeImageDestination) importBlob(repo *otbuiltin.Repo, blob *blobToImport) error {
+func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle, repo *otbuiltin.Repo, blob *blobToImport) error {
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root")
if err := ensureDirectoryExists(destinationPath); err != nil {
@@ -243,7 +275,7 @@ func (d *ostreeImageDestination) importBlob(repo *otbuiltin.Repo, blob *blobToIm
if err := archive.UntarPath(blob.BlobPath, destinationPath); err != nil {
return err
}
- if err := fixFiles(destinationPath, false); err != nil {
+ if err := fixFiles(selinuxHnd, destinationPath, destinationPath, false); err != nil {
return err
}
} else {
@@ -252,7 +284,7 @@ func (d *ostreeImageDestination) importBlob(repo *otbuiltin.Repo, blob *blobToIm
return err
}
- if err := fixFiles(destinationPath, true); err != nil {
+ if err := fixFiles(selinuxHnd, destinationPath, destinationPath, true); err != nil {
return err
}
}
@@ -348,6 +380,17 @@ func (d *ostreeImageDestination) Commit() error {
return err
}
+ var selinuxHnd *C.struct_selabel_handle
+
+ if os.Getuid() == 0 && selinux.GetEnabled() {
+ selinuxHnd, err := C.selabel_open(C.SELABEL_CTX_FILE, nil, 0)
+ if selinuxHnd == nil {
+ return errors.Wrapf(err, "cannot open the SELinux DB")
+ }
+
+ defer C.selabel_close(selinuxHnd)
+ }
+
checkLayer := func(hash string) error {
blob := d.blobs[hash]
// if the blob is not present in d.blobs then it is already stored in OSTree,
@@ -355,7 +398,7 @@ func (d *ostreeImageDestination) Commit() error {
if blob == nil {
return nil
}
- err := d.importBlob(repo, blob)
+ err := d.importBlob(selinuxHnd, repo, blob)
if err != nil {
return err
}
diff --git a/vendor/github.com/containers/image/ostree/ostree_src.go b/vendor/github.com/containers/image/ostree/ostree_src.go
index 3012da1af..c65a07b75 100644
--- a/vendor/github.com/containers/image/ostree/ostree_src.go
+++ b/vendor/github.com/containers/image/ostree/ostree_src.go
@@ -347,3 +347,8 @@ func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *d
}
return signatures, nil
}
+
+// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
+func (s *ostreeImageSource) LayerInfosForCopy() []types.BlobInfo {
+ return nil
+}
diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go
index 89c0264b9..038195c16 100644
--- a/vendor/github.com/containers/image/storage/storage_image.go
+++ b/vendor/github.com/containers/image/storage/storage_image.go
@@ -6,11 +6,12 @@ import (
"bytes"
"context"
"encoding/json"
+ "fmt"
"io"
"io/ioutil"
- "time"
-
- "github.com/pkg/errors"
+ "os"
+ "path/filepath"
+ "sync/atomic"
"github.com/containers/image/image"
"github.com/containers/image/manifest"
@@ -18,10 +19,14 @@ import (
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/ioutils"
- ddigest "github.com/opencontainers/go-digest"
+ digest "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
+const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs.
+
var (
// ErrBlobDigestMismatch is returned when PutBlob() is given a blob
// with a digest-based name that doesn't match its contents.
@@ -39,32 +44,25 @@ var (
type storageImageSource struct {
imageRef storageReference
- Tag string `json:"tag,omitempty"`
- Created time.Time `json:"created-time,omitempty"`
- ID string `json:"id"`
- BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle
- Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs
- LayerPosition map[ddigest.Digest]int `json:"-"` // Where we are in reading a blob's layers
- SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice
+ ID string
+ layerPosition map[digest.Digest]int // Where we are in reading a blob's layers
+ cachedManifest []byte // A cached copy of the manifest, if already known, or nil
+ SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
}
type storageImageDestination struct {
- imageRef storageReference
- Tag string `json:"tag,omitempty"`
- Created time.Time `json:"created-time,omitempty"`
- ID string `json:"id"`
- BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle
- Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs
- BlobData map[ddigest.Digest][]byte `json:"-"` // Map from names of blobs that aren't layers to contents, temporary
- Manifest []byte `json:"-"` // Manifest contents, temporary
- Signatures []byte `json:"-"` // Signature contents, temporary
- SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice
-}
-
-type storageLayerMetadata struct {
- Digest string `json:"digest,omitempty"`
- Size int64 `json:"size"`
- CompressedSize int64 `json:"compressed-size,omitempty"`
+ image types.ImageCloser
+ systemContext *types.SystemContext
+ imageRef storageReference // The reference we'll use to name the image
+ publicRef storageReference // The reference we return when asked about the name we'll give to the image
+ directory string // Temporary directory where we store blobs until Commit() time
+ nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs
+ manifest []byte // Manifest contents, temporary
+ signatures []byte // Signature contents, temporary
+ blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
+ fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
+ filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
+ SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
}
type storageImageCloser struct {
@@ -72,223 +70,292 @@ type storageImageCloser struct {
size int64
}
-// newImageSource sets us up to read out an image, which needs to already exist.
+// newImageSource sets up an image for reading.
func newImageSource(imageRef storageReference) (*storageImageSource, error) {
+ // First, locate the image.
img, err := imageRef.resolveImage()
if err != nil {
return nil, err
}
+
+ // Build the reader object.
image := &storageImageSource{
imageRef: imageRef,
- Created: time.Now(),
ID: img.ID,
- BlobList: []types.BlobInfo{},
- Layers: make(map[ddigest.Digest][]string),
- LayerPosition: make(map[ddigest.Digest]int),
+ layerPosition: make(map[digest.Digest]int),
SignatureSizes: []int{},
}
- if err := json.Unmarshal([]byte(img.Metadata), image); err != nil {
- return nil, errors.Wrap(err, "error decoding metadata for source image")
+ if img.Metadata != "" {
+ if err := json.Unmarshal([]byte(img.Metadata), image); err != nil {
+ return nil, errors.Wrap(err, "error decoding metadata for source image")
+ }
}
return image, nil
}
-// newImageDestination sets us up to write a new image.
-func newImageDestination(imageRef storageReference) (*storageImageDestination, error) {
+// Reference returns the image reference that we used to find this image.
+func (s storageImageSource) Reference() types.ImageReference {
+ return s.imageRef
+}
+
+// Close cleans up any resources we tied up while reading the image.
+func (s storageImageSource) Close() error {
+ return nil
+}
+
+// GetBlob reads the data blob or filesystem layer which matches the digest and size, if given.
+func (s *storageImageSource) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) {
+ rc, n, _, err = s.getBlobAndLayerID(info)
+ return rc, n, err
+}
+
+// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given.
+func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) {
+ var layer storage.Layer
+ var diffOptions *storage.DiffOptions
+ // We need a valid digest value.
+ err = info.Digest.Validate()
+ if err != nil {
+ return nil, -1, "", err
+ }
+ // Check if the blob corresponds to a diff that was used to initialize any layers. Our
+ // callers should try to retrieve layers using their uncompressed digests, so no need to
+ // check if they're using one of the compressed digests, which we can't reproduce anyway.
+ layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest)
+ // If it's not a layer, then it must be a data item.
+ if len(layers) == 0 {
+ b, err := s.imageRef.transport.store.ImageBigData(s.ID, info.Digest.String())
+ if err != nil {
+ return nil, -1, "", err
+ }
+ r := bytes.NewReader(b)
+ logrus.Debugf("exporting opaque data as blob %q", info.Digest.String())
+ return ioutil.NopCloser(r), int64(r.Len()), "", nil
+ }
+ // Step through the list of matching layers. Tests may want to verify that if we have multiple layers
+ // which claim to have the same contents, that we actually do have multiple layers, otherwise we could
+ // just go ahead and use the first one every time.
+ i := s.layerPosition[info.Digest]
+ s.layerPosition[info.Digest] = i + 1
+ if len(layers) > 0 {
+ layer = layers[i%len(layers)]
+ }
+ // Force the storage layer to not try to match any compression that was used when the layer was first
+ // handed to it.
+ noCompression := archive.Uncompressed
+ diffOptions = &storage.DiffOptions{
+ Compression: &noCompression,
+ }
+ if layer.UncompressedSize < 0 {
+ n = -1
+ } else {
+ n = layer.UncompressedSize
+ }
+ logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest)
+ rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions)
+ if err != nil {
+ return nil, -1, "", err
+ }
+ return rc, n, layer.ID, err
+}
+
+// GetManifest() reads the image's manifest.
+func (s *storageImageSource) GetManifest(instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) {
+ if instanceDigest != nil {
+ return nil, "", ErrNoManifestLists
+ }
+ if len(s.cachedManifest) == 0 {
+ // We stored the manifest as an item named after storage.ImageDigestBigDataKey.
+ cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.ID, storage.ImageDigestBigDataKey)
+ if err != nil {
+ return nil, "", err
+ }
+ s.cachedManifest = cachedBlob
+ }
+ return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err
+}
+
+// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of
+// the image, after they've been decompressed.
+func (s *storageImageSource) LayerInfosForCopy() []types.BlobInfo {
+ simg, err := s.imageRef.transport.store.Image(s.ID)
+ if err != nil {
+ logrus.Errorf("error reading image %q: %v", s.ID, err)
+ return nil
+ }
+ updatedBlobInfos := []types.BlobInfo{}
+ layerID := simg.TopLayer
+ _, manifestType, err := s.GetManifest(nil)
+ if err != nil {
+ logrus.Errorf("error reading image manifest for %q: %v", s.ID, err)
+ return nil
+ }
+ uncompressedLayerType := ""
+ switch manifestType {
+ case imgspecv1.MediaTypeImageManifest:
+ uncompressedLayerType = imgspecv1.MediaTypeImageLayer
+ case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType:
+ // This is actually a compressed type, but there's no uncompressed type defined
+ uncompressedLayerType = manifest.DockerV2Schema2LayerMediaType
+ }
+ for layerID != "" {
+ layer, err := s.imageRef.transport.store.Layer(layerID)
+ if err != nil {
+ logrus.Errorf("error reading layer %q in image %q: %v", layerID, s.ID, err)
+ return nil
+ }
+ if layer.UncompressedDigest == "" {
+ logrus.Errorf("uncompressed digest for layer %q is unknown", layerID)
+ return nil
+ }
+ if layer.UncompressedSize < 0 {
+ logrus.Errorf("uncompressed size for layer %q is unknown", layerID)
+ return nil
+ }
+ blobInfo := types.BlobInfo{
+ Digest: layer.UncompressedDigest,
+ Size: layer.UncompressedSize,
+ MediaType: uncompressedLayerType,
+ }
+ updatedBlobInfos = append([]types.BlobInfo{blobInfo}, updatedBlobInfos...)
+ layerID = layer.Parent
+ }
+ return updatedBlobInfos
+}
+
+// GetSignatures() parses the image's signatures blob into a slice of byte slices.
+func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) (signatures [][]byte, err error) {
+ if instanceDigest != nil {
+ return nil, ErrNoManifestLists
+ }
+ var offset int
+ sigslice := [][]byte{}
+ signature := []byte{}
+ if len(s.SignatureSizes) > 0 {
+ signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.ID, "signatures")
+ if err != nil {
+ return nil, errors.Wrapf(err, "error looking up signatures data for image %q", s.ID)
+ }
+ signature = signatureBlob
+ }
+ for _, length := range s.SignatureSizes {
+ sigslice = append(sigslice, signature[offset:offset+length])
+ offset += length
+ }
+ if offset != len(signature) {
+ return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset)
+ }
+ return sigslice, nil
+}
+
+// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until
+// it's time to Commit() the image
+func newImageDestination(ctx *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) {
+ directory, err := ioutil.TempDir(temporaryDirectoryForBigFiles, "storage")
+ if err != nil {
+ return nil, errors.Wrapf(err, "error creating a temporary directory")
+ }
+ // Break reading of the reference we're writing, so that copy.Image() won't try to rewrite
+ // schema1 image manifests to remove embedded references, since that changes the manifest's
+ // digest, and that makes the image unusable if we subsequently try to access it using a
+ // reference that mentions the no-longer-correct digest.
+ publicRef := imageRef
+ publicRef.name = nil
image := &storageImageDestination{
+ systemContext: ctx,
imageRef: imageRef,
- Tag: imageRef.reference,
- Created: time.Now(),
- ID: imageRef.id,
- BlobList: []types.BlobInfo{},
- Layers: make(map[ddigest.Digest][]string),
- BlobData: make(map[ddigest.Digest][]byte),
+ publicRef: publicRef,
+ directory: directory,
+ blobDiffIDs: make(map[digest.Digest]digest.Digest),
+ fileSizes: make(map[digest.Digest]int64),
+ filenames: make(map[digest.Digest]string),
SignatureSizes: []int{},
}
return image, nil
}
-func (s storageImageSource) Reference() types.ImageReference {
- return s.imageRef
-}
-
+// Reference returns a mostly-usable image reference that can't return a DockerReference, to
+// avoid triggering logic in copy.Image() that rewrites schema 1 image manifests in order to
+// remove image names that they contain which don't match the value we're using.
func (s storageImageDestination) Reference() types.ImageReference {
- return s.imageRef
+ return s.publicRef
}
-func (s storageImageSource) Close() error {
- return nil
-}
-
-func (s storageImageDestination) Close() error {
- return nil
+// Close cleans up the temporary directory.
+func (s *storageImageDestination) Close() error {
+ return os.RemoveAll(s.directory)
}
+// ShouldCompressLayers indicates whether or not a caller should compress not-already-compressed
+// data when handing it to us.
func (s storageImageDestination) ShouldCompressLayers() bool {
- // We ultimately have to decompress layers to populate trees on disk,
- // so callers shouldn't bother compressing them before handing them to
- // us, if they're not already compressed.
+ // We ultimately have to decompress layers to populate trees on disk, so callers shouldn't
+ // bother compressing them before handing them to us, if they're not already compressed.
return false
}
-// putBlob stores a layer or data blob, optionally enforcing that a digest in
-// blobinfo matches the incoming data.
-func (s *storageImageDestination) putBlob(stream io.Reader, blobinfo types.BlobInfo, enforceDigestAndSize bool) (types.BlobInfo, error) {
- blobSize := blobinfo.Size
- digest := blobinfo.Digest
+// PutBlob stores a layer or data blob in our temporary directory, checking that any information
+// in the blobinfo matches the incoming data.
+func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) {
errorBlobInfo := types.BlobInfo{
Digest: "",
Size: -1,
}
- // Try to read an initial snippet of the blob.
- buf := [archive.HeaderSize]byte{}
- n, err := io.ReadAtLeast(stream, buf[:], len(buf))
- if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
- return errorBlobInfo, err
- }
- // Set up to read the whole blob (the initial snippet, plus the rest)
- // while digesting it with either the default, or the passed-in digest,
- // if one was specified.
- hasher := ddigest.Canonical.Digester()
- if digest.Validate() == nil {
- if a := digest.Algorithm(); a.Available() {
+ // Set up to digest the blob and count its size while saving it to a file.
+ hasher := digest.Canonical.Digester()
+ if blobinfo.Digest.Validate() == nil {
+ if a := blobinfo.Digest.Algorithm(); a.Available() {
hasher = a.Digester()
}
}
- hash := ""
+ diffID := digest.Canonical.Digester()
+ filename := filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1)))
+ file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
+ if err != nil {
+ return errorBlobInfo, errors.Wrapf(err, "error creating temporary file %q", filename)
+ }
+ defer file.Close()
counter := ioutils.NewWriteCounter(hasher.Hash())
- defragmented := io.MultiReader(bytes.NewBuffer(buf[:n]), stream)
- multi := io.TeeReader(defragmented, counter)
- if (n > 0) && archive.IsArchive(buf[:n]) {
- // It's a filesystem layer. If it's not the first one in the
- // image, we assume that the most recently added layer is its
- // parent.
- parentLayer := ""
- for _, blob := range s.BlobList {
- if layerList, ok := s.Layers[blob.Digest]; ok {
- parentLayer = layerList[len(layerList)-1]
- }
- }
- // If we have an expected content digest, generate a layer ID
- // based on the parent's ID and the expected content digest.
- id := ""
- if digest.Validate() == nil {
- id = ddigest.Canonical.FromBytes([]byte(parentLayer + "+" + digest.String())).Hex()
- }
- // Attempt to create the identified layer and import its contents.
- layer, uncompressedSize, err := s.imageRef.transport.store.PutLayer(id, parentLayer, nil, "", true, multi)
- if err != nil && errors.Cause(err) != storage.ErrDuplicateID {
- logrus.Debugf("error importing layer blob %q as %q: %v", blobinfo.Digest, id, err)
- return errorBlobInfo, err
- }
- if errors.Cause(err) == storage.ErrDuplicateID {
- // We specified an ID, and there's already a layer with
- // the same ID. Drain the input so that we can look at
- // its length and digest.
- _, err := io.Copy(ioutil.Discard, multi)
- if err != nil && err != io.EOF {
- logrus.Debugf("error digesting layer blob %q: %v", blobinfo.Digest, id, err)
- return errorBlobInfo, err
- }
- hash = hasher.Digest().String()
- } else {
- // Applied the layer with the specified ID. Note the
- // size info and computed digest.
- hash = hasher.Digest().String()
- layerMeta := storageLayerMetadata{
- Digest: hash,
- CompressedSize: counter.Count,
- Size: uncompressedSize,
- }
- if metadata, err := json.Marshal(&layerMeta); len(metadata) != 0 && err == nil {
- s.imageRef.transport.store.SetMetadata(layer.ID, string(metadata))
- }
- // Hang on to the new layer's ID.
- id = layer.ID
- }
- // Check if the size looks right.
- if enforceDigestAndSize && blobinfo.Size >= 0 && blobinfo.Size != counter.Count {
- logrus.Debugf("layer blob %q size is %d, not %d, rejecting", blobinfo.Digest, counter.Count, blobinfo.Size)
- if layer != nil {
- // Something's wrong; delete the newly-created layer.
- s.imageRef.transport.store.DeleteLayer(layer.ID)
- }
- return errorBlobInfo, ErrBlobSizeMismatch
- }
- // If the content digest was specified, verify it.
- if enforceDigestAndSize && digest.Validate() == nil && digest.String() != hash {
- logrus.Debugf("layer blob %q digests to %q, rejecting", blobinfo.Digest, hash)
- if layer != nil {
- // Something's wrong; delete the newly-created layer.
- s.imageRef.transport.store.DeleteLayer(layer.ID)
- }
- return errorBlobInfo, ErrBlobDigestMismatch
- }
- // If we didn't get a blob size, return the one we calculated.
- if blobSize == -1 {
- blobSize = counter.Count
- }
- // If we didn't get a digest, construct one.
- if digest == "" {
- digest = ddigest.Digest(hash)
- }
- // Record that this layer blob is a layer, and the layer ID it
- // ended up having. This is a list, in case the same blob is
- // being applied more than once.
- s.Layers[digest] = append(s.Layers[digest], id)
- s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: counter.Count})
- if layer != nil {
- logrus.Debugf("blob %q imported as a filesystem layer %q", blobinfo.Digest, id)
- } else {
- logrus.Debugf("layer blob %q already present as layer %q", blobinfo.Digest, id)
- }
- } else {
- // It's just data. Finish scanning it in, check that our
- // computed digest matches the passed-in digest, and store it,
- // but leave it out of the blob-to-layer-ID map so that we can
- // tell that it's not a layer.
- blob, err := ioutil.ReadAll(multi)
- if err != nil && err != io.EOF {
- return errorBlobInfo, err
- }
- hash = hasher.Digest().String()
- if enforceDigestAndSize && blobinfo.Size >= 0 && int64(len(blob)) != blobinfo.Size {
- logrus.Debugf("blob %q size is %d, not %d, rejecting", blobinfo.Digest, int64(len(blob)), blobinfo.Size)
- return errorBlobInfo, ErrBlobSizeMismatch
- }
- // If we were given a digest, verify that the content matches
- // it.
- if enforceDigestAndSize && digest.Validate() == nil && digest.String() != hash {
- logrus.Debugf("blob %q digests to %q, rejecting", blobinfo.Digest, hash)
- return errorBlobInfo, ErrBlobDigestMismatch
- }
- // If we didn't get a blob size, return the one we calculated.
- if blobSize == -1 {
- blobSize = int64(len(blob))
- }
- // If we didn't get a digest, construct one.
- if digest == "" {
- digest = ddigest.Digest(hash)
- }
- // Save the blob for when we Commit().
- s.BlobData[digest] = blob
- s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: int64(len(blob))})
- logrus.Debugf("blob %q imported as opaque data %q", blobinfo.Digest, digest)
+ reader := io.TeeReader(io.TeeReader(stream, counter), file)
+ decompressed, err := archive.DecompressStream(reader)
+ if err != nil {
+ return errorBlobInfo, errors.Wrap(err, "error setting up to decompress blob")
+ }
+ // Copy the data to the file.
+ _, err = io.Copy(diffID.Hash(), decompressed)
+ decompressed.Close()
+ if err != nil {
+ return errorBlobInfo, errors.Wrapf(err, "error storing blob to file %q", filename)
+ }
+ // Ensure that any information that we were given about the blob is correct.
+ if blobinfo.Digest.Validate() == nil && blobinfo.Digest != hasher.Digest() {
+ return errorBlobInfo, ErrBlobDigestMismatch
+ }
+ if blobinfo.Size >= 0 && blobinfo.Size != counter.Count {
+ return errorBlobInfo, ErrBlobSizeMismatch
+ }
+ // Record information about the blob.
+ s.blobDiffIDs[hasher.Digest()] = diffID.Digest()
+ s.fileSizes[hasher.Digest()] = counter.Count
+ s.filenames[hasher.Digest()] = filename
+ blobDigest := blobinfo.Digest
+ if blobDigest.Validate() != nil {
+ blobDigest = hasher.Digest()
+ }
+ blobSize := blobinfo.Size
+ if blobSize < 0 {
+ blobSize = counter.Count
}
return types.BlobInfo{
- Digest: digest,
- Size: blobSize,
+ Digest: blobDigest,
+ Size: blobSize,
+ MediaType: blobinfo.MediaType,
}, nil
}
-// PutBlob is used to both store filesystem layers and binary data that is part
-// of the image. Filesystem layers are assumed to be imported in order, as
-// that is required by some of the underlying storage drivers.
-func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) {
- return s.putBlob(stream, blobinfo, true)
-}
-
-// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
+// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be
+// reapplied using ReapplyBlob.
+//
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
// it returns a non-nil error only on an unexpected failure.
@@ -296,93 +363,289 @@ func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64,
if blobinfo.Digest == "" {
return false, -1, errors.Errorf(`Can not check for a blob with unknown digest`)
}
- for _, blob := range s.BlobList {
- if blob.Digest == blobinfo.Digest {
- return true, blob.Size, nil
- }
+ if err := blobinfo.Digest.Validate(); err != nil {
+ return false, -1, errors.Wrapf(err, `Can not check for a blob with invalid digest`)
+ }
+ // Check if we've already cached it in a file.
+ if size, ok := s.fileSizes[blobinfo.Digest]; ok {
+ return true, size, nil
+ }
+ // Check if we have a wasn't-compressed layer in storage that's based on that blob.
+ layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest)
+ if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
+ return false, -1, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest)
+ }
+ if len(layers) > 0 {
+ // Save this for completeness.
+ s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
+ return true, layers[0].UncompressedSize, nil
}
+ // Check if we have a was-compressed layer in storage that's based on that blob.
+ layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest)
+ if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
+ return false, -1, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest)
+ }
+ if len(layers) > 0 {
+ // Record the uncompressed value so that we can use it to calculate layer IDs.
+ s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
+ return true, layers[0].CompressedSize, nil
+ }
+ // Nope, we don't have it.
return false, -1, nil
}
+// ReapplyBlob is now a no-op, assuming HasBlob() says we already have it, since Commit() can just apply the
+// same one when it walks the list in the manifest.
func (s *storageImageDestination) ReapplyBlob(blobinfo types.BlobInfo) (types.BlobInfo, error) {
- err := blobinfo.Digest.Validate()
+ present, size, err := s.HasBlob(blobinfo)
+ if !present {
+ return types.BlobInfo{}, errors.Errorf("error reapplying blob %+v: blob was not previously applied", blobinfo)
+ }
if err != nil {
- return types.BlobInfo{}, err
+ return types.BlobInfo{}, errors.Wrapf(err, "error reapplying blob %+v", blobinfo)
}
- if layerList, ok := s.Layers[blobinfo.Digest]; !ok || len(layerList) < 1 {
- b, err := s.imageRef.transport.store.ImageBigData(s.ID, blobinfo.Digest.String())
- if err != nil {
- return types.BlobInfo{}, err
+ blobinfo.Size = size
+ return blobinfo, nil
+}
+
+// computeID computes a recommended image ID based on information we have so far. If
+// the manifest is not of a type that we recognize, we return an empty value, indicating
+// that since we don't have a recommendation, a random ID should be used if one needs
+// to be allocated.
+func (s *storageImageDestination) computeID(m manifest.Manifest) string {
+ // Build the diffID list. We need the decompressed sums that we've been calculating to
+ // fill in the DiffIDs. It's expected (but not enforced by us) that the number of
+ // diffIDs corresponds to the number of non-EmptyLayer entries in the history.
+ var diffIDs []digest.Digest
+ switch m.(type) {
+ case *manifest.Schema1:
+ // Build a list of the diffIDs we've generated for the non-throwaway FS layers,
+ // in reverse of the order in which they were originally listed.
+ s1, ok := m.(*manifest.Schema1)
+ if !ok {
+ // Shouldn't happen
+ logrus.Debugf("internal error reading schema 1 manifest")
+ return ""
+ }
+ for i, history := range s1.History {
+ compat := manifest.Schema1V1Compatibility{}
+ if err := json.Unmarshal([]byte(history.V1Compatibility), &compat); err != nil {
+ logrus.Debugf("internal error reading schema 1 history: %v", err)
+ return ""
+ }
+ if compat.ThrowAway {
+ continue
+ }
+ blobSum := s1.FSLayers[i].BlobSum
+ diffID, ok := s.blobDiffIDs[blobSum]
+ if !ok {
+ logrus.Infof("error looking up diffID for layer %q", blobSum.String())
+ return ""
+ }
+ diffIDs = append([]digest.Digest{diffID}, diffIDs...)
}
- return types.BlobInfo{Digest: blobinfo.Digest, Size: int64(len(b))}, nil
+ case *manifest.Schema2, *manifest.OCI1:
+ // We know the ID calculation for these formats doesn't actually use the diffIDs,
+ // so we don't need to populate the diffID list.
}
- layerList := s.Layers[blobinfo.Digest]
- rc, _, err := diffLayer(s.imageRef.transport.store, layerList[len(layerList)-1])
+ id, err := m.ImageID(diffIDs)
if err != nil {
- return types.BlobInfo{}, err
+ return ""
+ }
+ return id
+}
+
+// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig
+// information out of it for Inspect().
+func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) {
+ if info.Digest == "" {
+ return nil, errors.Errorf(`no digest supplied when reading blob`)
+ }
+ if err := info.Digest.Validate(); err != nil {
+ return nil, errors.Wrapf(err, `invalid digest supplied when reading blob`)
+ }
+ // Assume it's a file, since we're only calling this from a place that expects to read files.
+ if filename, ok := s.filenames[info.Digest]; ok {
+ contents, err2 := ioutil.ReadFile(filename)
+ if err2 != nil {
+ return nil, errors.Wrapf(err2, `error reading blob from file %q`, filename)
+ }
+ return contents, nil
}
- return s.putBlob(rc, blobinfo, false)
+ // If it's not a file, it's a bug, because we're not expecting to be asked for a layer.
+ return nil, errors.New("blob not found")
}
func (s *storageImageDestination) Commit() error {
- // Create the image record.
+ // Find the list of layer blobs.
+ if len(s.manifest) == 0 {
+ return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()")
+ }
+ man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest))
+ if err != nil {
+ return errors.Wrapf(err, "error parsing manifest")
+ }
+ layerBlobs := man.LayerInfos()
+ // Extract or find the layers.
lastLayer := ""
- for _, blob := range s.BlobList {
- if layerList, ok := s.Layers[blob.Digest]; ok {
- lastLayer = layerList[len(layerList)-1]
+ addedLayers := []string{}
+ for _, blob := range layerBlobs {
+ var diff io.ReadCloser
+ // Check if there's already a layer with the ID that we'd give to the result of applying
+ // this layer blob to its parent, if it has one, or the blob's hex value otherwise.
+ diffID, haveDiffID := s.blobDiffIDs[blob.Digest]
+ if !haveDiffID {
+ // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
+ // or to even check if we had it.
+ logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
+ has, _, err := s.HasBlob(blob)
+ if err != nil {
+ return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String())
+ }
+ if !has {
+ return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String())
+ }
+ diffID, haveDiffID = s.blobDiffIDs[blob.Digest]
+ if !haveDiffID {
+ return errors.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String())
+ }
}
- }
- img, err := s.imageRef.transport.store.CreateImage(s.ID, nil, lastLayer, "", nil)
+ id := diffID.Hex()
+ if lastLayer != "" {
+ id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex()
+ }
+ if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
+ // There's already a layer that should have the right contents, just reuse it.
+ lastLayer = layer.ID
+ continue
+ }
+ // Check if we cached a file with that blobsum. If we didn't already have a layer with
+ // the blob's contents, we should have gotten a copy.
+ if filename, ok := s.filenames[blob.Digest]; ok {
+ // Use the file's contents to initialize the layer.
+ file, err2 := os.Open(filename)
+ if err2 != nil {
+ return errors.Wrapf(err2, "error opening file %q", filename)
+ }
+ defer file.Close()
+ diff = file
+ }
+ if diff == nil {
+ // Try to find a layer with contents matching that blobsum.
+ layer := ""
+ layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(blob.Digest)
+ if err2 == nil && len(layers) > 0 {
+ layer = layers[0].ID
+ } else {
+ layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest)
+ if err2 == nil && len(layers) > 0 {
+ layer = layers[0].ID
+ }
+ }
+ if layer == "" {
+ return errors.Wrapf(err2, "error locating layer for blob %q", blob.Digest)
+ }
+ // Use the layer's contents to initialize the new layer.
+ noCompression := archive.Uncompressed
+ diffOptions := &storage.DiffOptions{
+ Compression: &noCompression,
+ }
+ diff, err2 = s.imageRef.transport.store.Diff("", layer, diffOptions)
+ if err2 != nil {
+ return errors.Wrapf(err2, "error reading layer %q for blob %q", layer, blob.Digest)
+ }
+ defer diff.Close()
+ }
+ if diff == nil {
+ // This shouldn't have happened.
+ return errors.Errorf("error applying blob %q: content not found", blob.Digest)
+ }
+ // Build the new layer using the diff, regardless of where it came from.
+ layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, diff)
+ if err != nil {
+ return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest)
+ }
+ lastLayer = layer.ID
+ addedLayers = append([]string{lastLayer}, addedLayers...)
+ }
+ // If one of those blobs was a configuration blob, then we can try to dig out the date when the image
+ // was originally created, in case we're just copying it. If not, no harm done.
+ options := &storage.ImageOptions{}
+ if inspect, err := man.Inspect(s.getConfigBlob); err == nil {
+ logrus.Debugf("setting image creation date to %s", inspect.Created)
+ options.CreationDate = inspect.Created
+ }
+ if manifestDigest, err := manifest.Digest(s.manifest); err == nil {
+ options.Digest = manifestDigest
+ }
+ // Create the image record, pointing to the most-recently added layer.
+ intendedID := s.imageRef.id
+ if intendedID == "" {
+ intendedID = s.computeID(man)
+ }
+ oldNames := []string{}
+ img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options)
if err != nil {
if errors.Cause(err) != storage.ErrDuplicateID {
logrus.Debugf("error creating image: %q", err)
- return errors.Wrapf(err, "error creating image %q", s.ID)
+ return errors.Wrapf(err, "error creating image %q", intendedID)
}
- img, err = s.imageRef.transport.store.Image(s.ID)
+ img, err = s.imageRef.transport.store.Image(intendedID)
if err != nil {
- return errors.Wrapf(err, "error reading image %q", s.ID)
+ return errors.Wrapf(err, "error reading image %q", intendedID)
}
if img.TopLayer != lastLayer {
- logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", s.ID)
- return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", s.ID)
+ logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID)
+ return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", intendedID)
}
logrus.Debugf("reusing image ID %q", img.ID)
+ oldNames = append(oldNames, img.Names...)
} else {
logrus.Debugf("created new image ID %q", img.ID)
}
- s.ID = img.ID
- names := img.Names
- if s.Tag != "" {
- names = append(names, s.Tag)
+ // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so
+ // we just need to screen out the ones that are actually layers to get the list of non-layers.
+ dataBlobs := make(map[digest.Digest]struct{})
+ for blob := range s.filenames {
+ dataBlobs[blob] = struct{}{}
}
- // We have names to set, so move those names to this image.
- if len(names) > 0 {
- if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil {
+ for _, layerBlob := range layerBlobs {
+ delete(dataBlobs, layerBlob.Digest)
+ }
+ for blob := range dataBlobs {
+ v, err := ioutil.ReadFile(s.filenames[blob])
+ if err != nil {
+ return errors.Wrapf(err, "error copying non-layer blob %q to image", blob)
+ }
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v); err != nil {
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
}
- logrus.Debugf("error setting names on image %q: %v", img.ID, err)
- return err
+ logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err)
+ return errors.Wrapf(err, "error saving big data %q for image %q", blob.String(), img.ID)
}
- logrus.Debugf("set names of image %q to %v", img.ID, names)
}
- // Save the data blobs to disk, and drop their contents from memory.
- keys := []ddigest.Digest{}
- for k, v := range s.BlobData {
- if err := s.imageRef.transport.store.SetImageBigData(img.ID, k.String(), v); err != nil {
+ // Set the reference's name on the image.
+ if name := s.imageRef.DockerReference(); len(oldNames) > 0 || name != nil {
+ names := []string{}
+ if name != nil {
+ names = append(names, verboseName(name))
+ }
+ if len(oldNames) > 0 {
+ names = append(names, oldNames...)
+ }
+ if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil {
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
}
- logrus.Debugf("error saving big data %q for image %q: %v", k, img.ID, err)
- return err
+ logrus.Debugf("error setting names %v on image %q: %v", names, img.ID, err)
+ return errors.Wrapf(err, "error setting names %v on image %q", names, img.ID)
}
- keys = append(keys, k)
- }
- for _, key := range keys {
- delete(s.BlobData, key)
+ logrus.Debugf("set names of image %q to %v", img.ID, names)
}
- // Save the manifest, if we have one.
- if err := s.imageRef.transport.store.SetImageBigData(s.ID, "manifest", s.Manifest); err != nil {
+ // Save the manifest. Use storage.ImageDigestBigDataKey as the item's
+ // name, so that its digest can be used to locate the image in the Store.
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, storage.ImageDigestBigDataKey, s.manifest); err != nil {
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
}
@@ -390,12 +653,14 @@ func (s *storageImageDestination) Commit() error {
return err
}
// Save the signatures, if we have any.
- if err := s.imageRef.transport.store.SetImageBigData(s.ID, "signatures", s.Signatures); err != nil {
- if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
- logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
+ if len(s.signatures) > 0 {
+ if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures); err != nil {
+ if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
+ logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
+ }
+ logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
+ return err
}
- logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
- return err
}
// Save our metadata.
metadata, err := json.Marshal(s)
@@ -407,7 +672,7 @@ func (s *storageImageDestination) Commit() error {
return err
}
if len(metadata) != 0 {
- if err = s.imageRef.transport.store.SetMetadata(s.ID, string(metadata)); err != nil {
+ if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil {
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
}
@@ -420,7 +685,7 @@ func (s *storageImageDestination) Commit() error {
}
var manifestMIMETypes = []string{
- // TODO(runcom): we'll add OCI as part of another PR here
+ imgspecv1.MediaTypeImageManifest,
manifest.DockerV2Schema2MediaType,
manifest.DockerV2Schema1SignedMediaType,
manifest.DockerV2Schema1MediaType,
@@ -430,23 +695,20 @@ func (s *storageImageDestination) SupportedManifestMIMETypes() []string {
return manifestMIMETypes
}
-// PutManifest writes manifest to the destination.
-// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
-// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
-// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
+// PutManifest writes the manifest to the destination.
func (s *storageImageDestination) PutManifest(manifest []byte) error {
- s.Manifest = make([]byte, len(manifest))
- copy(s.Manifest, manifest)
+ s.manifest = make([]byte, len(manifest))
+ copy(s.manifest, manifest)
return nil
}
-// SupportsSignatures returns an error if we can't expect GetSignatures() to
-// return data that was previously supplied to PutSignatures().
+// SupportsSignatures returns an error if we can't expect GetSignatures() to return data that was
+// previously supplied to PutSignatures().
func (s *storageImageDestination) SupportsSignatures() error {
return nil
}
-// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
+// AcceptsForeignLayerURLs returns false iff foreign layers in the manifest should actually be
// uploaded to the image destination, true otherwise.
func (s *storageImageDestination) AcceptsForeignLayerURLs() bool {
return false
@@ -457,6 +719,7 @@ func (s *storageImageDestination) MustMatchRuntimeOS() bool {
return true
}
+// PutSignatures records the image's signatures for committing as a single data blob.
func (s *storageImageDestination) PutSignatures(signatures [][]byte) error {
sizes := []int{}
sigblob := []byte{}
@@ -467,150 +730,67 @@ func (s *storageImageDestination) PutSignatures(signatures [][]byte) error {
copy(newblob[len(sigblob):], sig)
sigblob = newblob
}
- s.Signatures = sigblob
+ s.signatures = sigblob
s.SignatureSizes = sizes
return nil
}
-func (s *storageImageSource) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) {
- rc, n, _, err = s.getBlobAndLayerID(info)
- return rc, n, err
-}
-
-func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) {
- err = info.Digest.Validate()
- if err != nil {
- return nil, -1, "", err
- }
- if layerList, ok := s.Layers[info.Digest]; !ok || len(layerList) < 1 {
- b, err := s.imageRef.transport.store.ImageBigData(s.ID, info.Digest.String())
- if err != nil {
- return nil, -1, "", err
- }
- r := bytes.NewReader(b)
- logrus.Debugf("exporting opaque data as blob %q", info.Digest.String())
- return ioutil.NopCloser(r), int64(r.Len()), "", nil
- }
- // If the blob was "put" more than once, we have multiple layer IDs
- // which should all produce the same diff. For the sake of tests that
- // want to make sure we created different layers each time the blob was
- // "put", though, cycle through the layers.
- layerList := s.Layers[info.Digest]
- position, ok := s.LayerPosition[info.Digest]
- if !ok {
- position = 0
- }
- s.LayerPosition[info.Digest] = (position + 1) % len(layerList)
- logrus.Debugf("exporting filesystem layer %q for blob %q", layerList[position], info.Digest)
- rc, n, err = diffLayer(s.imageRef.transport.store, layerList[position])
- return rc, n, layerList[position], err
-}
-
-func diffLayer(store storage.Store, layerID string) (rc io.ReadCloser, n int64, err error) {
- layer, err := store.Layer(layerID)
- if err != nil {
- return nil, -1, err
- }
- layerMeta := storageLayerMetadata{
- CompressedSize: -1,
- }
- if layer.Metadata != "" {
- if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil {
- return nil, -1, errors.Wrapf(err, "error decoding metadata for layer %q", layerID)
- }
- }
- if layerMeta.CompressedSize <= 0 {
- n = -1
- } else {
- n = layerMeta.CompressedSize
- }
- diff, err := store.Diff("", layer.ID, nil)
- if err != nil {
- return nil, -1, err
- }
- return diff, n, nil
-}
-
-// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
-// It may use a remote (= slow) service.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
-// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
-func (s *storageImageSource) GetManifest(instanceDigest *ddigest.Digest) (manifestBlob []byte, MIMEType string, err error) {
- if instanceDigest != nil {
- return nil, "", ErrNoManifestLists
- }
- manifestBlob, err = s.imageRef.transport.store.ImageBigData(s.ID, "manifest")
- return manifestBlob, manifest.GuessMIMEType(manifestBlob), err
-}
-
-// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
-// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
-// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
-// (e.g. if the source never returns manifest lists).
-func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *ddigest.Digest) ([][]byte, error) {
- if instanceDigest != nil {
- return nil, ErrNoManifestLists
- }
- var offset int
- signature, err := s.imageRef.transport.store.ImageBigData(s.ID, "signatures")
- if err != nil {
- return nil, err
- }
- sigslice := [][]byte{}
- for _, length := range s.SignatureSizes {
- sigslice = append(sigslice, signature[offset:offset+length])
- offset += length
- }
- if offset != len(signature) {
- return nil, errors.Errorf("signatures data contained %d extra bytes", len(signature)-offset)
- }
- return sigslice, nil
-}
-
+// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the
+// signatures, and the uncompressed sizes of all of the image's layers.
func (s *storageImageSource) getSize() (int64, error) {
var sum int64
- names, err := s.imageRef.transport.store.ListImageBigData(s.imageRef.id)
+ // Size up the data blobs.
+ dataNames, err := s.imageRef.transport.store.ListImageBigData(s.ID)
if err != nil {
- return -1, errors.Wrapf(err, "error reading image %q", s.imageRef.id)
+ return -1, errors.Wrapf(err, "error reading image %q", s.ID)
}
- for _, name := range names {
- bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.imageRef.id, name)
+ for _, dataName := range dataNames {
+ bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.ID, dataName)
if err != nil {
- return -1, errors.Wrapf(err, "error reading data blob size %q for %q", name, s.imageRef.id)
+ return -1, errors.Wrapf(err, "error reading data blob size %q for %q", dataName, s.ID)
}
sum += bigSize
}
+ // Add the signature sizes.
for _, sigSize := range s.SignatureSizes {
sum += int64(sigSize)
}
- for _, layerList := range s.Layers {
- for _, layerID := range layerList {
- layer, err := s.imageRef.transport.store.Layer(layerID)
- if err != nil {
- return -1, err
- }
- layerMeta := storageLayerMetadata{
- Size: -1,
- }
- if layer.Metadata != "" {
- if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil {
- return -1, errors.Wrapf(err, "error decoding metadata for layer %q", layerID)
- }
- }
- if layerMeta.Size < 0 {
- return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID)
- }
- sum += layerMeta.Size
+ // Prepare to walk the layer list.
+ img, err := s.imageRef.transport.store.Image(s.ID)
+ if err != nil {
+ return -1, errors.Wrapf(err, "error reading image info %q", s.ID)
+ }
+ // Walk the layer list.
+ layerID := img.TopLayer
+ for layerID != "" {
+ layer, err := s.imageRef.transport.store.Layer(layerID)
+ if err != nil {
+ return -1, err
+ }
+ if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 {
+ return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID)
}
+ sum += layer.UncompressedSize
+ if layer.Parent == "" {
+ break
+ }
+ layerID = layer.Parent
}
return sum, nil
}
+// Size() adds up the sizes of the image's data blobs (which includes the configuration blob), the
+// signatures, and the uncompressed sizes of all of the image's layers.
+func (s *storageImageSource) Size() (int64, error) {
+ return s.getSize()
+}
+
+// Size() returns the previously-computed size of the image, with no error.
func (s *storageImageCloser) Size() (int64, error) {
return s.size, nil
}
-// newImage creates an ImageCloser that also knows its size
+// newImage creates an image that also knows its size
func newImage(ctx *types.SystemContext, s storageReference) (types.ImageCloser, error) {
src, err := newImageSource(s)
if err != nil {
diff --git a/vendor/github.com/containers/image/storage/storage_reference.go b/vendor/github.com/containers/image/storage/storage_reference.go
index efad95ce6..bcb00f60e 100644
--- a/vendor/github.com/containers/image/storage/storage_reference.go
+++ b/vendor/github.com/containers/image/storage/storage_reference.go
@@ -8,6 +8,7 @@ import (
"github.com/containers/image/docker/reference"
"github.com/containers/image/types"
"github.com/containers/storage"
+ digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -20,9 +21,11 @@ type storageReference struct {
reference string
id string
name reference.Named
+ tag string
+ digest digest.Digest
}
-func newReference(transport storageTransport, reference, id string, name reference.Named) *storageReference {
+func newReference(transport storageTransport, reference, id string, name reference.Named, tag string, digest digest.Digest) *storageReference {
// We take a copy of the transport, which contains a pointer to the
// store that it used for resolving this reference, so that the
// transport that we'll return from Transport() won't be affected by
@@ -32,6 +35,8 @@ func newReference(transport storageTransport, reference, id string, name referen
reference: reference,
id: id,
name: name,
+ tag: tag,
+ digest: digest,
}
}
@@ -39,25 +44,49 @@ func newReference(transport storageTransport, reference, id string, name referen
// one present with the same name or ID, and return the image.
func (s *storageReference) resolveImage() (*storage.Image, error) {
if s.id == "" {
+ // Look for an image that has the expanded reference name as an explicit Name value.
image, err := s.transport.store.Image(s.reference)
if image != nil && err == nil {
s.id = image.ID
}
}
+ if s.id == "" && s.name != nil && s.digest != "" {
+ // Look for an image with the specified digest that has the same name,
+ // though possibly with a different tag or digest, as a Name value, so
+ // that the canonical reference can be implicitly resolved to the image.
+ images, err := s.transport.store.ImagesByDigest(s.digest)
+ if images != nil && err == nil {
+ repo := reference.FamiliarName(reference.TrimNamed(s.name))
+ search:
+ for _, image := range images {
+ for _, name := range image.Names {
+ if named, err := reference.ParseNormalizedNamed(name); err == nil {
+ if reference.FamiliarName(reference.TrimNamed(named)) == repo {
+ s.id = image.ID
+ break search
+ }
+ }
+ }
+ }
+ }
+ }
if s.id == "" {
- logrus.Errorf("reference %q does not resolve to an image ID", s.StringWithinTransport())
- return nil, ErrNoSuchImage
+ logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport())
+ return nil, errors.Wrapf(ErrNoSuchImage, "reference %q does not resolve to an image ID", s.StringWithinTransport())
}
img, err := s.transport.store.Image(s.id)
if err != nil {
return nil, errors.Wrapf(err, "error reading image %q", s.id)
}
- if s.reference != "" {
+ if s.name != nil {
+ repo := reference.FamiliarName(reference.TrimNamed(s.name))
nameMatch := false
for _, name := range img.Names {
- if name == s.reference {
- nameMatch = true
- break
+ if named, err := reference.ParseNormalizedNamed(name); err == nil {
+ if reference.FamiliarName(reference.TrimNamed(named)) == repo {
+ nameMatch = true
+ break
+ }
}
}
if !nameMatch {
@@ -78,8 +107,21 @@ func (s storageReference) Transport() types.ImageTransport {
}
}
-// Return a name with a tag, if we have a name to base them on.
+// Return a name with a tag or digest, if we have either, else return it bare.
func (s storageReference) DockerReference() reference.Named {
+ if s.name == nil {
+ return nil
+ }
+ if s.tag != "" {
+ if namedTagged, err := reference.WithTag(s.name, s.tag); err == nil {
+ return namedTagged
+ }
+ }
+ if s.digest != "" {
+ if canonical, err := reference.WithDigest(s.name, s.digest); err == nil {
+ return canonical
+ }
+ }
return s.name
}
@@ -93,7 +135,7 @@ func (s storageReference) StringWithinTransport() string {
optionsList = ":" + strings.Join(options, ",")
}
storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]"
- if s.name == nil {
+ if s.reference == "" {
return storeSpec + "@" + s.id
}
if s.id == "" {
@@ -122,11 +164,8 @@ func (s storageReference) PolicyConfigurationNamespaces() []string {
driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]"
namespaces := []string{}
if s.name != nil {
- if s.id != "" {
- // The reference without the ID is also a valid namespace.
- namespaces = append(namespaces, storeSpec+s.reference)
- }
- components := strings.Split(s.name.Name(), "/")
+ name := reference.TrimNamed(s.name)
+ components := strings.Split(name.String(), "/")
for len(components) > 0 {
namespaces = append(namespaces, storeSpec+strings.Join(components, "/"))
components = components[:len(components)-1]
@@ -166,5 +205,5 @@ func (s storageReference) NewImageSource(ctx *types.SystemContext) (types.ImageS
}
func (s storageReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
- return newImageDestination(s)
+ return newImageDestination(ctx, s)
}
diff --git a/vendor/github.com/containers/image/storage/storage_transport.go b/vendor/github.com/containers/image/storage/storage_transport.go
index df4578a8d..f6ebcdc4a 100644
--- a/vendor/github.com/containers/image/storage/storage_transport.go
+++ b/vendor/github.com/containers/image/storage/storage_transport.go
@@ -13,11 +13,14 @@ import (
"github.com/containers/image/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
- "github.com/opencontainers/go-digest"
- ddigest "github.com/opencontainers/go-digest"
+ digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
+const (
+ minimumTruncatedIDLength = 3
+)
+
func init() {
transports.Register(Transport)
}
@@ -103,60 +106,124 @@ func (s *storageTransport) DefaultGIDMap() []idtools.IDMap {
// relative to the given store, and returns it in a reference object.
func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) {
var name reference.Named
- var sum digest.Digest
- var err error
if ref == "" {
- return nil, ErrInvalidReference
+ return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference")
}
if ref[0] == '[' {
// Ignore the store specifier.
closeIndex := strings.IndexRune(ref, ']')
if closeIndex < 1 {
- return nil, ErrInvalidReference
+ return nil, errors.Wrapf(ErrInvalidReference, "store specifier in %q did not end", ref)
}
ref = ref[closeIndex+1:]
}
- refInfo := strings.SplitN(ref, "@", 2)
- if len(refInfo) == 1 {
- // A name.
- name, err = reference.ParseNormalizedNamed(refInfo[0])
- if err != nil {
- return nil, err
+
+ // The last segment, if there's more than one, is either a digest from a reference, or an image ID.
+ split := strings.LastIndex(ref, "@")
+ idOrDigest := ""
+ if split != -1 {
+ // Peel off that last bit so that we can work on the rest.
+ idOrDigest = ref[split+1:]
+ if idOrDigest == "" {
+ return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest)
}
- } else if len(refInfo) == 2 {
- // An ID, possibly preceded by a name.
- if refInfo[0] != "" {
- name, err = reference.ParseNormalizedNamed(refInfo[0])
- if err != nil {
- return nil, err
- }
+ ref = ref[:split]
+ }
+
+ // The middle segment (now the last segment), if there is one, is a digest.
+ split = strings.LastIndex(ref, "@")
+ sum := digest.Digest("")
+ if split != -1 {
+ sum = digest.Digest(ref[split+1:])
+ if sum == "" {
+ return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum)
}
- sum, err = digest.Parse(refInfo[1])
- if err != nil || sum.Validate() != nil {
- sum, err = digest.Parse("sha256:" + refInfo[1])
- if err != nil || sum.Validate() != nil {
- return nil, err
- }
+ ref = ref[:split]
+ }
+
+ // If we have something that unambiguously should be a digest, validate it, and then the third part,
+ // if we have one, as an ID.
+ id := ""
+ if sum != "" {
+ if idSum, err := digest.Parse("sha256:" + idOrDigest); err != nil || idSum.Validate() != nil {
+ return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID", idOrDigest)
+ }
+ if err := sum.Validate(); err != nil {
+ return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum)
+ }
+ id = idOrDigest
+ if img, err := store.Image(idOrDigest); err == nil && img != nil && len(idOrDigest) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, idOrDigest) {
+ // The ID is a truncated version of the ID of an image that's present in local storage,
+ // so we might as well use the expanded value.
+ id = img.ID
+ }
+ } else if idOrDigest != "" {
+ // There was no middle portion, so the final portion could be either a digest or an ID.
+ if idSum, err := digest.Parse("sha256:" + idOrDigest); err == nil && idSum.Validate() == nil {
+ // It's an ID.
+ id = idOrDigest
+ } else if idSum, err := digest.Parse(idOrDigest); err == nil && idSum.Validate() == nil {
+ // It's a digest.
+ sum = idSum
+ } else if img, err := store.Image(idOrDigest); err == nil && img != nil && len(idOrDigest) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, idOrDigest) {
+ // It's a truncated version of the ID of an image that's present in local storage,
+ // and we may need the expanded value.
+ id = img.ID
+ } else {
+ return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest)
+ }
+ }
+
+ // If we only had one portion, then _maybe_ it's a truncated image ID. Only check on that if it's
+ // at least of what we guess is a reasonable minimum length, because we don't want a really short value
+ // like "a" matching an image by ID prefix when the input was actually meant to specify an image name.
+ if len(ref) >= minimumTruncatedIDLength && sum == "" && id == "" {
+ if img, err := store.Image(ref); err == nil && img != nil && strings.HasPrefix(img.ID, ref) {
+ // It's a truncated version of the ID of an image that's present in local storage;
+ // we need to expand it.
+ id = img.ID
+ ref = ""
}
- } else { // Coverage: len(refInfo) is always 1 or 2
- // Anything else: store specified in a form we don't
- // recognize.
- return nil, ErrInvalidReference
}
+
+ // The initial portion is probably a name, possibly with a tag.
+ if ref != "" {
+ var err error
+ if name, err = reference.ParseNormalizedNamed(ref); err != nil {
+ return nil, errors.Wrapf(err, "error parsing named reference %q", ref)
+ }
+ }
+ if name == nil && sum == "" && id == "" {
+ return nil, errors.Errorf("error parsing reference")
+ }
+
+ // Construct a copy of the store spec.
optionsList := ""
options := store.GraphOptions()
if len(options) > 0 {
optionsList = ":" + strings.Join(options, ",")
}
storeSpec := "[" + store.GraphDriverName() + "@" + store.GraphRoot() + "+" + store.RunRoot() + optionsList + "]"
- id := ""
- if sum.Validate() == nil {
- id = sum.Hex()
- }
+
+ // Convert the name back into a reference string, if we got a name.
refname := ""
+ tag := ""
if name != nil {
- name = reference.TagNameOnly(name)
- refname = verboseName(name)
+ if sum.Validate() == nil {
+ canonical, err := reference.WithDigest(name, sum)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error mixing name %q with digest %q", name, sum)
+ }
+ refname = verboseName(canonical)
+ } else {
+ name = reference.TagNameOnly(name)
+ tagged, ok := name.(reference.Tagged)
+ if !ok {
+ return nil, errors.Errorf("error parsing possibly-tagless name %q", ref)
+ }
+ refname = verboseName(name)
+ tag = tagged.Tag()
+ }
}
if refname == "" {
logrus.Debugf("parsed reference to id into %q", storeSpec+"@"+id)
@@ -165,7 +232,7 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (
} else {
logrus.Debugf("parsed reference to refname@id into %q", storeSpec+refname+"@"+id)
}
- return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name), nil
+ return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name, tag, sum), nil
}
func (s *storageTransport) GetStore() (storage.Store, error) {
@@ -184,11 +251,14 @@ func (s *storageTransport) GetStore() (storage.Store, error) {
return s.store, nil
}
-// ParseReference takes a name and/or an ID ("_name_"/"@_id_"/"_name_@_id_"),
+// ParseReference takes a name and a tag or digest and/or ID
+// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"),
// possibly prefixed with a store specifier in the form "[_graphroot_]" or
// "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or
// "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]",
// tries to figure out which it is, and returns it in a reference object.
+// If _id_ is the ID of an image that's present in local storage, it can be truncated, and
+// even be specified as if it were a _name_, value.
func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) {
var store storage.Store
// Check if there's a store location prefix. If there is, then it
@@ -267,17 +337,23 @@ func (s *storageTransport) ParseReference(reference string) (types.ImageReferenc
func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) {
dref := ref.DockerReference()
- if dref == nil {
- if sref, ok := ref.(*storageReference); ok {
- if sref.id != "" {
- if img, err := store.Image(sref.id); err == nil {
- return img, nil
- }
+ if dref != nil {
+ if img, err := store.Image(verboseName(dref)); err == nil {
+ return img, nil
+ }
+ }
+ if sref, ok := ref.(*storageReference); ok {
+ if sref.id != "" {
+ if img, err := store.Image(sref.id); err == nil {
+ return img, nil
}
}
- return nil, ErrInvalidReference
+ tmpRef := *sref
+ if img, err := tmpRef.resolveImage(); err == nil {
+ return img, nil
+ }
}
- return store.Image(verboseName(dref))
+ return nil, storage.ErrImageUnknown
}
func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) {
@@ -337,7 +413,7 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error {
if err != nil {
return err
}
- _, err = ddigest.Parse("sha256:" + scopeInfo[1])
+ _, err = digest.Parse("sha256:" + scopeInfo[1])
if err != nil {
return err
}
@@ -347,11 +423,28 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error {
return nil
}
-func verboseName(name reference.Named) string {
- name = reference.TagNameOnly(name)
+func verboseName(r reference.Reference) string {
+ if r == nil {
+ return ""
+ }
+ named, isNamed := r.(reference.Named)
+ digested, isDigested := r.(reference.Digested)
+ tagged, isTagged := r.(reference.Tagged)
+ name := ""
tag := ""
- if tagged, ok := name.(reference.NamedTagged); ok {
- tag = ":" + tagged.Tag()
+ sum := ""
+ if isNamed {
+ name = (reference.TrimNamed(named)).String()
+ }
+ if isTagged {
+ if tagged.Tag() != "" {
+ tag = ":" + tagged.Tag()
+ }
+ }
+ if isDigested {
+ if digested.Digest().Validate() == nil {
+ sum = "@" + digested.Digest().String()
+ }
}
- return name.Name() + tag
+ return name + tag + sum
}
diff --git a/vendor/github.com/containers/image/tarball/tarball_src.go b/vendor/github.com/containers/image/tarball/tarball_src.go
index 872a446a1..8b5b496da 100644
--- a/vendor/github.com/containers/image/tarball/tarball_src.go
+++ b/vendor/github.com/containers/image/tarball/tarball_src.go
@@ -254,7 +254,7 @@ func (is *tarballImageSource) Reference() types.ImageReference {
return &is.reference
}
-// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
-func (*tarballImageSource) UpdatedLayerInfos() []types.BlobInfo {
+// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
+func (*tarballImageSource) LayerInfosForCopy() []types.BlobInfo {
return nil
}
diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go
index 176887480..2e9c7105d 100644
--- a/vendor/github.com/containers/image/types/types.go
+++ b/vendor/github.com/containers/image/types/types.go
@@ -126,6 +126,10 @@ type ImageSource interface {
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
// (e.g. if the source never returns manifest lists).
GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error)
+ // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest.
+ // The Digest field is guaranteed to be provided; Size may be -1.
+ // WARNING: The list may contain duplicates, and they are semantically relevant.
+ LayerInfosForCopy() []BlobInfo
}
// ImageDestination is a service, possibly remote (= slow), to store components of a single image.
@@ -211,6 +215,10 @@ type UnparsedImage interface {
Manifest() ([]byte, string, error)
// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
Signatures(ctx context.Context) ([][]byte, error)
+ // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest.
+ // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
+ // WARNING: The list may contain duplicates, and they are semantically relevant.
+ LayerInfosForCopy() []BlobInfo
}
// Image is the primary API for inspecting properties of images.
diff --git a/vendor/github.com/containers/image/vendor.conf b/vendor/github.com/containers/image/vendor.conf
index 20b3f7e1a..f3634b38e 100644
--- a/vendor/github.com/containers/image/vendor.conf
+++ b/vendor/github.com/containers/image/vendor.conf
@@ -1,5 +1,5 @@
github.com/sirupsen/logrus v1.0.0
-github.com/containers/storage 47536c89fcc545a87745e1a1573addc439409165
+github.com/containers/storage master
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
@@ -36,4 +36,5 @@ github.com/tchap/go-patricia v2.2.6
github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d
github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
-github.com/gogo/protobuf/proto fcdc5011193ff531a548e9b0301828d5a5b97fd8
+github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8
+github.com/pquerna/ffjson master
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
index ae601f431..d224406e7 100644
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go
@@ -227,20 +227,20 @@ func parseOptions(options []string) (*overlayOptions, error) {
}
key = strings.ToLower(key)
switch key {
- case "overlay.override_kernel_check", "overlay2.override_kernel_check":
+ case ".override_kernel_check", "overlay.override_kernel_check", "overlay2.override_kernel_check":
logrus.Debugf("overlay: override_kernelcheck=%s", val)
o.overrideKernelCheck, err = strconv.ParseBool(val)
if err != nil {
return nil, err
}
- case "overlay.size", "overlay2.size":
+ case ".size", "overlay.size", "overlay2.size":
logrus.Debugf("overlay: size=%s", val)
size, err := units.RAMInBytes(val)
if err != nil {
return nil, err
}
o.quota.Size = uint64(size)
- case "overlay.imagestore", "overlay2.imagestore":
+ case ".imagestore", "overlay.imagestore", "overlay2.imagestore":
logrus.Debugf("overlay: imagestore=%s", val)
// Additional read only image stores to use for lower paths
for _, store := range strings.Split(val, ",") {
diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go
index cf8eca914..ae62207d1 100644
--- a/vendor/github.com/containers/storage/drivers/vfs/driver.go
+++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go
@@ -36,6 +36,11 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
for _, option := range options {
if strings.HasPrefix(option, "vfs.imagestore=") {
d.homes = append(d.homes, strings.Split(option[15:], ",")...)
+ continue
+ }
+ if strings.HasPrefix(option, ".imagestore=") {
+ d.homes = append(d.homes, strings.Split(option[12:], ",")...)
+ continue
}
}
return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil
diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go
index ed22e131f..962e1bb76 100644
--- a/vendor/github.com/containers/storage/images.go
+++ b/vendor/github.com/containers/storage/images.go
@@ -14,12 +14,22 @@ import (
"github.com/pkg/errors"
)
+const (
+ // ImageDigestBigDataKey is the name of the big data item whose
+ // contents we consider useful for computing a "digest" of the
+ // image, by which we can locate the image later.
+ ImageDigestBigDataKey = "manifest"
+)
+
// An Image is a reference to a layer and an associated metadata string.
type Image struct {
// ID is either one which was specified at create-time, or a random
// value which was generated by the library.
ID string `json:"id"`
+ // Digest is a digest value that we can use to locate the image.
+ Digest digest.Digest `json:"digest,omitempty"`
+
// Names is an optional set of user-defined convenience values. The
// image can be referred to by its ID or any of its names. Names are
// unique among images.
@@ -28,7 +38,7 @@ type Image struct {
// TopLayer is the ID of the topmost layer of the image itself, if the
// image contains one or more layers. Multiple images can refer to the
// same top layer.
- TopLayer string `json:"layer"`
+ TopLayer string `json:"layer,omitempty"`
// Metadata is data we keep for the convenience of the caller. It is not
// expected to be large, since it is kept in memory.
@@ -74,6 +84,10 @@ type ROImageStore interface {
// Images returns a slice enumerating the known images.
Images() ([]Image, error)
+
+ // Images returns a slice enumerating the images which have a big data
+ // item with the name ImageDigestBigDataKey and the specified digest.
+ ByDigest(d digest.Digest) ([]*Image, error)
}
// ImageStore provides bookkeeping for information about Images.
@@ -87,7 +101,7 @@ type ImageStore interface {
// Create creates an image that has a specified ID (or a random one) and
// optional names, using the specified layer as its topmost (hopefully
// read-only) layer. That layer can be referenced by multiple images.
- Create(id string, names []string, layer, metadata string, created time.Time) (*Image, error)
+ Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error)
// SetNames replaces the list of names associated with an image with the
// supplied values.
@@ -107,6 +121,7 @@ type imageStore struct {
idindex *truncindex.TruncIndex
byid map[string]*Image
byname map[string]*Image
+ bydigest map[digest.Digest][]*Image
}
func (r *imageStore) Images() ([]Image, error) {
@@ -140,6 +155,7 @@ func (r *imageStore) Load() error {
idlist := []string{}
ids := make(map[string]*Image)
names := make(map[string]*Image)
+ digests := make(map[digest.Digest][]*Image)
if err = json.Unmarshal(data, &images); len(data) == 0 || err == nil {
idlist = make([]string, 0, len(images))
for n, image := range images {
@@ -152,6 +168,16 @@ func (r *imageStore) Load() error {
}
names[name] = images[n]
}
+ // Implicit digest
+ if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok {
+ digests[digest] = append(digests[digest], images[n])
+ }
+ // Explicit digest
+ if image.Digest == "" {
+ image.Digest = image.BigDataDigests[ImageDigestBigDataKey]
+ } else if image.Digest != image.BigDataDigests[ImageDigestBigDataKey] {
+ digests[image.Digest] = append(digests[image.Digest], images[n])
+ }
}
}
if shouldSave && !r.IsReadWrite() {
@@ -161,6 +187,7 @@ func (r *imageStore) Load() error {
r.idindex = truncindex.NewTruncIndex(idlist)
r.byid = ids
r.byname = names
+ r.bydigest = digests
if shouldSave {
return r.Save()
}
@@ -199,6 +226,7 @@ func newImageStore(dir string) (ImageStore, error) {
images: []*Image{},
byid: make(map[string]*Image),
byname: make(map[string]*Image),
+ bydigest: make(map[digest.Digest][]*Image),
}
if err := istore.Load(); err != nil {
return nil, err
@@ -219,6 +247,7 @@ func newROImageStore(dir string) (ROImageStore, error) {
images: []*Image{},
byid: make(map[string]*Image),
byname: make(map[string]*Image),
+ bydigest: make(map[digest.Digest][]*Image),
}
if err := istore.Load(); err != nil {
return nil, err
@@ -265,7 +294,7 @@ func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
return r.Save()
}
-func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time) (image *Image, err error) {
+func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (image *Image, err error) {
if !r.IsReadWrite() {
return nil, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new images at %q", r.imagespath())
}
@@ -292,6 +321,7 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
if err == nil {
image = &Image{
ID: id,
+ Digest: searchableDigest,
Names: names,
TopLayer: layer,
Metadata: metadata,
@@ -304,6 +334,10 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
r.images = append(r.images, image)
r.idindex.Add(id)
r.byid[id] = image
+ if searchableDigest != "" {
+ list := r.bydigest[searchableDigest]
+ r.bydigest[searchableDigest] = append(list, image)
+ }
for _, name := range names {
r.byname[name] = image
}
@@ -383,6 +417,28 @@ func (r *imageStore) Delete(id string) error {
r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...)
}
}
+ if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok {
+ // remove the image from the digest-based index
+ if list, ok := r.bydigest[digest]; ok {
+ prunedList := imageSliceWithoutValue(list, image)
+ if len(prunedList) == 0 {
+ delete(r.bydigest, digest)
+ } else {
+ r.bydigest[digest] = prunedList
+ }
+ }
+ }
+ if image.Digest != "" {
+ // remove the image's hard-coded digest from the digest-based index
+ if list, ok := r.bydigest[image.Digest]; ok {
+ prunedList := imageSliceWithoutValue(list, image)
+ if len(prunedList) == 0 {
+ delete(r.bydigest, image.Digest)
+ } else {
+ r.bydigest[image.Digest] = prunedList
+ }
+ }
+ }
if err := r.Save(); err != nil {
return err
}
@@ -411,6 +467,13 @@ func (r *imageStore) Exists(id string) bool {
return ok
}
+func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) {
+ if images, ok := r.bydigest[d]; ok {
+ return images, nil
+ }
+ return nil, ErrImageUnknown
+}
+
func (r *imageStore) BigData(id, key string) ([]byte, error) {
if key == "" {
return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve image big data value for empty name")
@@ -486,6 +549,17 @@ func (r *imageStore) BigDataNames(id string) ([]string, error) {
return image.BigDataNames, nil
}
+func imageSliceWithoutValue(slice []*Image, value *Image) []*Image {
+ modified := make([]*Image, 0, len(slice))
+ for _, v := range slice {
+ if v == value {
+ continue
+ }
+ modified = append(modified, v)
+ }
+ return modified
+}
+
func (r *imageStore) SetBigData(id, key string, data []byte) error {
if key == "" {
return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for image big data item")
@@ -528,6 +602,29 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
image.BigDataNames = append(image.BigDataNames, key)
save = true
}
+ if key == ImageDigestBigDataKey {
+ if oldDigest != "" && oldDigest != newDigest && oldDigest != image.Digest {
+ // remove the image from the list of images in the digest-based
+ // index which corresponds to the old digest for this item, unless
+ // it's also the hard-coded digest
+ if list, ok := r.bydigest[oldDigest]; ok {
+ prunedList := imageSliceWithoutValue(list, image)
+ if len(prunedList) == 0 {
+ delete(r.bydigest, oldDigest)
+ } else {
+ r.bydigest[oldDigest] = prunedList
+ }
+ }
+ }
+ // add the image to the list of images in the digest-based index which
+ // corresponds to the new digest for this item, unless it's already there
+ list := r.bydigest[newDigest]
+ if len(list) == len(imageSliceWithoutValue(list, image)) {
+ // the list isn't shortened by trying to prune this image from it,
+ // so it's not in there yet
+ r.bydigest[newDigest] = append(list, image)
+ }
+ }
if save {
err = r.Save()
}
diff --git a/vendor/github.com/containers/storage/images_ffjson.go b/vendor/github.com/containers/storage/images_ffjson.go
index fd3ef11a6..f6a8b0650 100644
--- a/vendor/github.com/containers/storage/images_ffjson.go
+++ b/vendor/github.com/containers/storage/images_ffjson.go
@@ -38,6 +38,11 @@ func (j *Image) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
buf.WriteString(`{ "id":`)
fflib.WriteJsonString(buf, string(j.ID))
buf.WriteByte(',')
+ if len(j.Digest) != 0 {
+ buf.WriteString(`"digest":`)
+ fflib.WriteJsonString(buf, string(j.Digest))
+ buf.WriteByte(',')
+ }
if len(j.Names) != 0 {
buf.WriteString(`"names":`)
if j.Names != nil {
@@ -54,9 +59,11 @@ func (j *Image) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
}
buf.WriteByte(',')
}
- buf.WriteString(`"layer":`)
- fflib.WriteJsonString(buf, string(j.TopLayer))
- buf.WriteByte(',')
+ if len(j.TopLayer) != 0 {
+ buf.WriteString(`"layer":`)
+ fflib.WriteJsonString(buf, string(j.TopLayer))
+ buf.WriteByte(',')
+ }
if len(j.Metadata) != 0 {
buf.WriteString(`"metadata":`)
fflib.WriteJsonString(buf, string(j.Metadata))
@@ -144,6 +151,8 @@ const (
ffjtImageID
+ ffjtImageDigest
+
ffjtImageNames
ffjtImageTopLayer
@@ -163,6 +172,8 @@ const (
var ffjKeyImageID = []byte("id")
+var ffjKeyImageDigest = []byte("digest")
+
var ffjKeyImageNames = []byte("names")
var ffjKeyImageTopLayer = []byte("layer")
@@ -266,6 +277,14 @@ mainparse:
goto mainparse
}
+ case 'd':
+
+ if bytes.Equal(ffjKeyImageDigest, kn) {
+ currentKey = ffjtImageDigest
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
case 'f':
if bytes.Equal(ffjKeyImageFlags, kn) {
@@ -356,6 +375,12 @@ mainparse:
goto mainparse
}
+ if fflib.EqualFoldRight(ffjKeyImageDigest, kn) {
+ currentKey = ffjtImageDigest
+ state = fflib.FFParse_want_colon
+ goto mainparse
+ }
+
if fflib.SimpleLetterEqualFold(ffjKeyImageID, kn) {
currentKey = ffjtImageID
state = fflib.FFParse_want_colon
@@ -382,6 +407,9 @@ mainparse:
case ffjtImageID:
goto handle_ID
+ case ffjtImageDigest:
+ goto handle_Digest
+
case ffjtImageNames:
goto handle_Names
@@ -446,6 +474,32 @@ handle_ID:
state = fflib.FFParse_after_value
goto mainparse
+handle_Digest:
+
+ /* handler: j.Digest type=digest.Digest kind=string quoted=false*/
+
+ {
+
+ {
+ if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
+ return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok))
+ }
+ }
+
+ if tok == fflib.FFTok_null {
+
+ } else {
+
+ outBuf := fs.Output.Bytes()
+
+ j.Digest = digest.Digest(string(outBuf))
+
+ }
+ }
+
+ state = fflib.FFParse_after_value
+ goto mainparse
+
handle_Names:
/* handler: j.Names type=[]string kind=slice quoted=false*/
diff --git a/vendor/github.com/containers/storage/pkg/archive/example_changes.go b/vendor/github.com/containers/storage/pkg/archive/example_changes.go
new file mode 100644
index 000000000..70f9c5564
--- /dev/null
+++ b/vendor/github.com/containers/storage/pkg/archive/example_changes.go
@@ -0,0 +1,97 @@
+// +build ignore
+
+// Simple tool to create an archive stream from an old and new directory
+//
+// By default it will stream the comparison of two temporary directories with junk files
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+
+ "github.com/containers/storage/pkg/archive"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ flDebug = flag.Bool("D", false, "debugging output")
+ flNewDir = flag.String("newdir", "", "")
+ flOldDir = flag.String("olddir", "", "")
+ log = logrus.New()
+)
+
+func main() {
+ flag.Usage = func() {
+ fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
+ fmt.Printf("%s [OPTIONS]\n", os.Args[0])
+ flag.PrintDefaults()
+ }
+ flag.Parse()
+ log.Out = os.Stderr
+ if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
+ logrus.SetLevel(logrus.DebugLevel)
+ }
+ var newDir, oldDir string
+
+ if len(*flNewDir) == 0 {
+ var err error
+ newDir, err = ioutil.TempDir("", "storage-test-newDir")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.RemoveAll(newDir)
+ if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
+ log.Fatal(err)
+ }
+ } else {
+ newDir = *flNewDir
+ }
+
+ if len(*flOldDir) == 0 {
+ oldDir, err := ioutil.TempDir("", "storage-test-oldDir")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.RemoveAll(oldDir)
+ } else {
+ oldDir = *flOldDir
+ }
+
+ changes, err := archive.ChangesDirs(newDir, oldDir)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ a, err := archive.ExportChanges(newDir, changes)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer a.Close()
+
+ i, err := io.Copy(os.Stdout, a)
+ if err != nil && err != io.EOF {
+ log.Fatal(err)
+ }
+ fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
+}
+
+func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
+ fileData := []byte("fooo")
+ for n := 0; n < numberOfFiles; n++ {
+ fileName := fmt.Sprintf("file-%d", n)
+ if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
+ return 0, err
+ }
+ if makeLinks {
+ if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
+ return 0, err
+ }
+ }
+ }
+ totalSize := numberOfFiles * len(fileData)
+ return totalSize, nil
+}
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go
index f7e3dc347..de6054327 100644
--- a/vendor/github.com/containers/storage/store.go
+++ b/vendor/github.com/containers/storage/store.go
@@ -370,6 +370,10 @@ type Store interface {
// and may have different metadata, big data items, and flags.
ImagesByTopLayer(id string) ([]*Image, error)
+ // ImagesByDigest returns a list of images which contain a big data item
+ // named ImageDigestBigDataKey whose contents have the specified digest.
+ ImagesByDigest(d digest.Digest) ([]*Image, error)
+
// Container returns a specific container.
Container(id string) (*Container, error)
@@ -430,6 +434,8 @@ type ImageOptions struct {
// CreationDate, if not zero, will override the default behavior of marking the image as having been
// created when CreateImage() was called, recording CreationDate instead.
CreationDate time.Time
+ // Digest is a hard-coded digest value that we can use to look up the image. It is optional.
+ Digest digest.Digest
}
// ContainerOptions is used for passing options to a Store's CreateContainer() method.
@@ -487,11 +493,6 @@ func GetStore(options StoreOptions) (Store, error) {
if err := os.MkdirAll(options.RunRoot, 0700); err != nil && !os.IsExist(err) {
return nil, err
}
- for _, subdir := range []string{} {
- if err := os.MkdirAll(filepath.Join(options.RunRoot, subdir), 0700); err != nil && !os.IsExist(err) {
- return nil, err
- }
- }
if err := os.MkdirAll(options.GraphRoot, 0700); err != nil && !os.IsExist(err) {
return nil, err
}
@@ -834,11 +835,11 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, o
}
creationDate := time.Now().UTC()
- if options != nil {
+ if options != nil && !options.CreationDate.IsZero() {
creationDate = options.CreationDate
}
- return ristore.Create(id, names, layer, metadata, creationDate)
+ return ristore.Create(id, names, layer, metadata, creationDate, options.Digest)
}
func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) {
@@ -2086,6 +2087,33 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) {
return images, nil
}
+func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) {
+ images := []*Image{}
+
+ istore, err := s.ImageStore()
+ if err != nil {
+ return nil, err
+ }
+
+ istores, err := s.ROImageStores()
+ if err != nil {
+ return nil, err
+ }
+ for _, store := range append([]ROImageStore{istore}, istores...) {
+ store.Lock()
+ defer store.Unlock()
+ if modified, err := store.Modified(); modified || err != nil {
+ store.Load()
+ }
+ imageList, err := store.ByDigest(d)
+ if err != nil && err != ErrImageUnknown {
+ return nil, err
+ }
+ images = append(images, imageList...)
+ }
+ return images, nil
+}
+
func (s *store) Container(id string) (*Container, error) {
rcstore, err := s.ContainerStore()
if err != nil {
diff --git a/vendor/github.com/containers/storage/vendor.conf b/vendor/github.com/containers/storage/vendor.conf
index 9958101e2..a30f8feb5 100644
--- a/vendor/github.com/containers/storage/vendor.conf
+++ b/vendor/github.com/containers/storage/vendor.conf
@@ -15,7 +15,7 @@ github.com/pmezard/go-difflib v1.0.0
github.com/sirupsen/logrus v1.0.0
github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
github.com/tchap/go-patricia v2.2.6
-github.com/vbatts/tar-split bd4c5d64c3e9297f410025a3b1bd0c58f659e721
+github.com/vbatts/tar-split v0.10.2
golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
diff --git a/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go b/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go
deleted file mode 100644
index ba6d41d85..000000000
--- a/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2014 Docker, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-// Code forked from Docker project
-package daemon
-
-import (
- "net"
- "os"
-)
-
-// SdNotify sends a message to the init daemon. It is common to ignore the error.
-// If `unsetEnvironment` is true, the environment variable `NOTIFY_SOCKET`
-// will be unconditionally unset.
-//
-// It returns one of the following:
-// (false, nil) - notification not supported (i.e. NOTIFY_SOCKET is unset)
-// (false, err) - notification supported, but failure happened (e.g. error connecting to NOTIFY_SOCKET or while sending data)
-// (true, nil) - notification supported, data has been sent
-func SdNotify(unsetEnvironment bool, state string) (sent bool, err error) {
- socketAddr := &net.UnixAddr{
- Name: os.Getenv("NOTIFY_SOCKET"),
- Net: "unixgram",
- }
-
- // NOTIFY_SOCKET not set
- if socketAddr.Name == "" {
- return false, nil
- }
-
- if unsetEnvironment {
- err = os.Unsetenv("NOTIFY_SOCKET")
- }
- if err != nil {
- return false, err
- }
-
- conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr)
- // Error connecting to NOTIFY_SOCKET
- if err != nil {
- return false, err
- }
- defer conn.Close()
-
- _, err = conn.Write([]byte(state))
- // Error sending the message
- if err != nil {
- return false, err
- }
- return true, nil
-}
diff --git a/vendor/github.com/coreos/go-systemd/daemon/watchdog.go b/vendor/github.com/coreos/go-systemd/daemon/watchdog.go
deleted file mode 100644
index 35a92e6e6..000000000
--- a/vendor/github.com/coreos/go-systemd/daemon/watchdog.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2016 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package daemon
-
-import (
- "fmt"
- "os"
- "strconv"
- "time"
-)
-
-// SdWatchdogEnabled return watchdog information for a service.
-// Process should send daemon.SdNotify("WATCHDOG=1") every time / 2.
-// If `unsetEnvironment` is true, the environment variables `WATCHDOG_USEC`
-// and `WATCHDOG_PID` will be unconditionally unset.
-//
-// It returns one of the following:
-// (0, nil) - watchdog isn't enabled or we aren't the watched PID.
-// (0, err) - an error happened (e.g. error converting time).
-// (time, nil) - watchdog is enabled and we can send ping.
-// time is delay before inactive service will be killed.
-func SdWatchdogEnabled(unsetEnvironment bool) (time.Duration, error) {
- wusec := os.Getenv("WATCHDOG_USEC")
- wpid := os.Getenv("WATCHDOG_PID")
- if unsetEnvironment {
- wusecErr := os.Unsetenv("WATCHDOG_USEC")
- wpidErr := os.Unsetenv("WATCHDOG_PID")
- if wusecErr != nil {
- return 0, wusecErr
- }
- if wpidErr != nil {
- return 0, wpidErr
- }
- }
-
- if wusec == "" {
- return 0, nil
- }
- s, err := strconv.Atoi(wusec)
- if err != nil {
- return 0, fmt.Errorf("error converting WATCHDOG_USEC: %s", err)
- }
- if s <= 0 {
- return 0, fmt.Errorf("error WATCHDOG_USEC must be a positive number")
- }
- interval := time.Duration(s) * time.Microsecond
-
- if wpid == "" {
- return interval, nil
- }
- p, err := strconv.Atoi(wpid)
- if err != nil {
- return 0, fmt.Errorf("error converting WATCHDOG_PID: %s", err)
- }
- if os.Getpid() != p {
- return 0, nil
- }
-
- return interval, nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go
deleted file mode 100644
index 3d737b3e1..000000000
--- a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package ioutils
-
-import (
- "errors"
- "io"
-)
-
-var errBufferFull = errors.New("buffer is full")
-
-type fixedBuffer struct {
- buf []byte
- pos int
- lastRead int
-}
-
-func (b *fixedBuffer) Write(p []byte) (int, error) {
- n := copy(b.buf[b.pos:cap(b.buf)], p)
- b.pos += n
-
- if n < len(p) {
- if b.pos == cap(b.buf) {
- return n, errBufferFull
- }
- return n, io.ErrShortWrite
- }
- return n, nil
-}
-
-func (b *fixedBuffer) Read(p []byte) (int, error) {
- n := copy(p, b.buf[b.lastRead:b.pos])
- b.lastRead += n
- return n, nil
-}
-
-func (b *fixedBuffer) Len() int {
- return b.pos - b.lastRead
-}
-
-func (b *fixedBuffer) Cap() int {
- return cap(b.buf)
-}
-
-func (b *fixedBuffer) Reset() {
- b.pos = 0
- b.lastRead = 0
- b.buf = b.buf[:0]
-}
-
-func (b *fixedBuffer) String() string {
- return string(b.buf[b.lastRead:b.pos])
-}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go
deleted file mode 100644
index 72a04f349..000000000
--- a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go
+++ /dev/null
@@ -1,186 +0,0 @@
-package ioutils
-
-import (
- "errors"
- "io"
- "sync"
-)
-
-// maxCap is the highest capacity to use in byte slices that buffer data.
-const maxCap = 1e6
-
-// minCap is the lowest capacity to use in byte slices that buffer data
-const minCap = 64
-
-// blockThreshold is the minimum number of bytes in the buffer which will cause
-// a write to BytesPipe to block when allocating a new slice.
-const blockThreshold = 1e6
-
-var (
- // ErrClosed is returned when Write is called on a closed BytesPipe.
- ErrClosed = errors.New("write to closed BytesPipe")
-
- bufPools = make(map[int]*sync.Pool)
- bufPoolsLock sync.Mutex
-)
-
-// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue).
-// All written data may be read at most once. Also, BytesPipe allocates
-// and releases new byte slices to adjust to current needs, so the buffer
-// won't be overgrown after peak loads.
-type BytesPipe struct {
- mu sync.Mutex
- wait *sync.Cond
- buf []*fixedBuffer
- bufLen int
- closeErr error // error to return from next Read. set to nil if not closed.
-}
-
-// NewBytesPipe creates new BytesPipe, initialized by specified slice.
-// If buf is nil, then it will be initialized with slice which cap is 64.
-// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
-func NewBytesPipe() *BytesPipe {
- bp := &BytesPipe{}
- bp.buf = append(bp.buf, getBuffer(minCap))
- bp.wait = sync.NewCond(&bp.mu)
- return bp
-}
-
-// Write writes p to BytesPipe.
-// It can allocate new []byte slices in a process of writing.
-func (bp *BytesPipe) Write(p []byte) (int, error) {
- bp.mu.Lock()
-
- written := 0
-loop0:
- for {
- if bp.closeErr != nil {
- bp.mu.Unlock()
- return written, ErrClosed
- }
-
- if len(bp.buf) == 0 {
- bp.buf = append(bp.buf, getBuffer(64))
- }
- // get the last buffer
- b := bp.buf[len(bp.buf)-1]
-
- n, err := b.Write(p)
- written += n
- bp.bufLen += n
-
- // errBufferFull is an error we expect to get if the buffer is full
- if err != nil && err != errBufferFull {
- bp.wait.Broadcast()
- bp.mu.Unlock()
- return written, err
- }
-
- // if there was enough room to write all then break
- if len(p) == n {
- break
- }
-
- // more data: write to the next slice
- p = p[n:]
-
- // make sure the buffer doesn't grow too big from this write
- for bp.bufLen >= blockThreshold {
- bp.wait.Wait()
- if bp.closeErr != nil {
- continue loop0
- }
- }
-
- // add new byte slice to the buffers slice and continue writing
- nextCap := b.Cap() * 2
- if nextCap > maxCap {
- nextCap = maxCap
- }
- bp.buf = append(bp.buf, getBuffer(nextCap))
- }
- bp.wait.Broadcast()
- bp.mu.Unlock()
- return written, nil
-}
-
-// CloseWithError causes further reads from a BytesPipe to return immediately.
-func (bp *BytesPipe) CloseWithError(err error) error {
- bp.mu.Lock()
- if err != nil {
- bp.closeErr = err
- } else {
- bp.closeErr = io.EOF
- }
- bp.wait.Broadcast()
- bp.mu.Unlock()
- return nil
-}
-
-// Close causes further reads from a BytesPipe to return immediately.
-func (bp *BytesPipe) Close() error {
- return bp.CloseWithError(nil)
-}
-
-// Read reads bytes from BytesPipe.
-// Data could be read only once.
-func (bp *BytesPipe) Read(p []byte) (n int, err error) {
- bp.mu.Lock()
- if bp.bufLen == 0 {
- if bp.closeErr != nil {
- bp.mu.Unlock()
- return 0, bp.closeErr
- }
- bp.wait.Wait()
- if bp.bufLen == 0 && bp.closeErr != nil {
- err := bp.closeErr
- bp.mu.Unlock()
- return 0, err
- }
- }
-
- for bp.bufLen > 0 {
- b := bp.buf[0]
- read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error
- n += read
- bp.bufLen -= read
-
- if b.Len() == 0 {
- // it's empty so return it to the pool and move to the next one
- returnBuffer(b)
- bp.buf[0] = nil
- bp.buf = bp.buf[1:]
- }
-
- if len(p) == read {
- break
- }
-
- p = p[read:]
- }
-
- bp.wait.Broadcast()
- bp.mu.Unlock()
- return
-}
-
-func returnBuffer(b *fixedBuffer) {
- b.Reset()
- bufPoolsLock.Lock()
- pool := bufPools[b.Cap()]
- bufPoolsLock.Unlock()
- if pool != nil {
- pool.Put(b)
- }
-}
-
-func getBuffer(size int) *fixedBuffer {
- bufPoolsLock.Lock()
- pool, ok := bufPools[size]
- if !ok {
- pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }}
- bufPools[size] = pool
- }
- bufPoolsLock.Unlock()
- return pool.Get().(*fixedBuffer)
-}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go
deleted file mode 100644
index a56c46265..000000000
--- a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go
+++ /dev/null
@@ -1,162 +0,0 @@
-package ioutils
-
-import (
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
-)
-
-// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a
-// temporary file and closing it atomically changes the temporary file to
-// destination path. Writing and closing concurrently is not allowed.
-func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) {
- f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
- if err != nil {
- return nil, err
- }
-
- abspath, err := filepath.Abs(filename)
- if err != nil {
- return nil, err
- }
- return &atomicFileWriter{
- f: f,
- fn: abspath,
- perm: perm,
- }, nil
-}
-
-// AtomicWriteFile atomically writes data to a file named by filename.
-func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
- f, err := NewAtomicFileWriter(filename, perm)
- if err != nil {
- return err
- }
- n, err := f.Write(data)
- if err == nil && n < len(data) {
- err = io.ErrShortWrite
- f.(*atomicFileWriter).writeErr = err
- }
- if err1 := f.Close(); err == nil {
- err = err1
- }
- return err
-}
-
-type atomicFileWriter struct {
- f *os.File
- fn string
- writeErr error
- perm os.FileMode
-}
-
-func (w *atomicFileWriter) Write(dt []byte) (int, error) {
- n, err := w.f.Write(dt)
- if err != nil {
- w.writeErr = err
- }
- return n, err
-}
-
-func (w *atomicFileWriter) Close() (retErr error) {
- defer func() {
- if retErr != nil || w.writeErr != nil {
- os.Remove(w.f.Name())
- }
- }()
- if err := w.f.Sync(); err != nil {
- w.f.Close()
- return err
- }
- if err := w.f.Close(); err != nil {
- return err
- }
- if err := os.Chmod(w.f.Name(), w.perm); err != nil {
- return err
- }
- if w.writeErr == nil {
- return os.Rename(w.f.Name(), w.fn)
- }
- return nil
-}
-
-// AtomicWriteSet is used to atomically write a set
-// of files and ensure they are visible at the same time.
-// Must be committed to a new directory.
-type AtomicWriteSet struct {
- root string
-}
-
-// NewAtomicWriteSet creates a new atomic write set to
-// atomically create a set of files. The given directory
-// is used as the base directory for storing files before
-// commit. If no temporary directory is given the system
-// default is used.
-func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) {
- td, err := ioutil.TempDir(tmpDir, "write-set-")
- if err != nil {
- return nil, err
- }
-
- return &AtomicWriteSet{
- root: td,
- }, nil
-}
-
-// WriteFile writes a file to the set, guaranteeing the file
-// has been synced.
-func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error {
- f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
- if err != nil {
- return err
- }
- n, err := f.Write(data)
- if err == nil && n < len(data) {
- err = io.ErrShortWrite
- }
- if err1 := f.Close(); err == nil {
- err = err1
- }
- return err
-}
-
-type syncFileCloser struct {
- *os.File
-}
-
-func (w syncFileCloser) Close() error {
- err := w.File.Sync()
- if err1 := w.File.Close(); err == nil {
- err = err1
- }
- return err
-}
-
-// FileWriter opens a file writer inside the set. The file
-// should be synced and closed before calling commit.
-func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) {
- f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm)
- if err != nil {
- return nil, err
- }
- return syncFileCloser{f}, nil
-}
-
-// Cancel cancels the set and removes all temporary data
-// created in the set.
-func (ws *AtomicWriteSet) Cancel() error {
- return os.RemoveAll(ws.root)
-}
-
-// Commit moves all created files to the target directory. The
-// target directory must not exist and the parent of the target
-// directory must exist.
-func (ws *AtomicWriteSet) Commit(target string) error {
- return os.Rename(ws.root, target)
-}
-
-// String returns the location the set is writing to.
-func (ws *AtomicWriteSet) String() string {
- return ws.root
-}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go
deleted file mode 100644
index 63f3c07f4..000000000
--- a/vendor/github.com/docker/docker/pkg/ioutils/readers.go
+++ /dev/null
@@ -1,154 +0,0 @@
-package ioutils
-
-import (
- "crypto/sha256"
- "encoding/hex"
- "io"
-
- "golang.org/x/net/context"
-)
-
-type readCloserWrapper struct {
- io.Reader
- closer func() error
-}
-
-func (r *readCloserWrapper) Close() error {
- return r.closer()
-}
-
-// NewReadCloserWrapper returns a new io.ReadCloser.
-func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
- return &readCloserWrapper{
- Reader: r,
- closer: closer,
- }
-}
-
-type readerErrWrapper struct {
- reader io.Reader
- closer func()
-}
-
-func (r *readerErrWrapper) Read(p []byte) (int, error) {
- n, err := r.reader.Read(p)
- if err != nil {
- r.closer()
- }
- return n, err
-}
-
-// NewReaderErrWrapper returns a new io.Reader.
-func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
- return &readerErrWrapper{
- reader: r,
- closer: closer,
- }
-}
-
-// HashData returns the sha256 sum of src.
-func HashData(src io.Reader) (string, error) {
- h := sha256.New()
- if _, err := io.Copy(h, src); err != nil {
- return "", err
- }
- return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
-}
-
-// OnEOFReader wraps an io.ReadCloser and a function
-// the function will run at the end of file or close the file.
-type OnEOFReader struct {
- Rc io.ReadCloser
- Fn func()
-}
-
-func (r *OnEOFReader) Read(p []byte) (n int, err error) {
- n, err = r.Rc.Read(p)
- if err == io.EOF {
- r.runFunc()
- }
- return
-}
-
-// Close closes the file and run the function.
-func (r *OnEOFReader) Close() error {
- err := r.Rc.Close()
- r.runFunc()
- return err
-}
-
-func (r *OnEOFReader) runFunc() {
- if fn := r.Fn; fn != nil {
- fn()
- r.Fn = nil
- }
-}
-
-// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read
-// operations.
-type cancelReadCloser struct {
- cancel func()
- pR *io.PipeReader // Stream to read from
- pW *io.PipeWriter
-}
-
-// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the
-// context is cancelled. The returned io.ReadCloser must be closed when it is
-// no longer needed.
-func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser {
- pR, pW := io.Pipe()
-
- // Create a context used to signal when the pipe is closed
- doneCtx, cancel := context.WithCancel(context.Background())
-
- p := &cancelReadCloser{
- cancel: cancel,
- pR: pR,
- pW: pW,
- }
-
- go func() {
- _, err := io.Copy(pW, in)
- select {
- case <-ctx.Done():
- // If the context was closed, p.closeWithError
- // was already called. Calling it again would
- // change the error that Read returns.
- default:
- p.closeWithError(err)
- }
- in.Close()
- }()
- go func() {
- for {
- select {
- case <-ctx.Done():
- p.closeWithError(ctx.Err())
- case <-doneCtx.Done():
- return
- }
- }
- }()
-
- return p
-}
-
-// Read wraps the Read method of the pipe that provides data from the wrapped
-// ReadCloser.
-func (p *cancelReadCloser) Read(buf []byte) (n int, err error) {
- return p.pR.Read(buf)
-}
-
-// closeWithError closes the wrapper and its underlying reader. It will
-// cause future calls to Read to return err.
-func (p *cancelReadCloser) closeWithError(err error) {
- p.pW.CloseWithError(err)
- p.cancel()
-}
-
-// Close closes the wrapper its underlying reader. It will cause
-// future calls to Read to return io.EOF.
-func (p *cancelReadCloser) Close() error {
- p.closeWithError(io.EOF)
- return nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go
deleted file mode 100644
index 1539ad21b..000000000
--- a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// +build !windows
-
-package ioutils
-
-import "io/ioutil"
-
-// TempDir on Unix systems is equivalent to ioutil.TempDir.
-func TempDir(dir, prefix string) (string, error) {
- return ioutil.TempDir(dir, prefix)
-}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go
deleted file mode 100644
index c258e5fdd..000000000
--- a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build windows
-
-package ioutils
-
-import (
- "io/ioutil"
-
- "github.com/docker/docker/pkg/longpath"
-)
-
-// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.
-func TempDir(dir, prefix string) (string, error) {
- tempDir, err := ioutil.TempDir(dir, prefix)
- if err != nil {
- return "", err
- }
- return longpath.AddPrefix(tempDir), nil
-}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go
deleted file mode 100644
index 52a4901ad..000000000
--- a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package ioutils
-
-import (
- "io"
- "sync"
-)
-
-// WriteFlusher wraps the Write and Flush operation ensuring that every write
-// is a flush. In addition, the Close method can be called to intercept
-// Read/Write calls if the targets lifecycle has already ended.
-type WriteFlusher struct {
- w io.Writer
- flusher flusher
- flushed chan struct{}
- flushedOnce sync.Once
- closed chan struct{}
- closeLock sync.Mutex
-}
-
-type flusher interface {
- Flush()
-}
-
-var errWriteFlusherClosed = io.EOF
-
-func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
- select {
- case <-wf.closed:
- return 0, errWriteFlusherClosed
- default:
- }
-
- n, err = wf.w.Write(b)
- wf.Flush() // every write is a flush.
- return n, err
-}
-
-// Flush the stream immediately.
-func (wf *WriteFlusher) Flush() {
- select {
- case <-wf.closed:
- return
- default:
- }
-
- wf.flushedOnce.Do(func() {
- close(wf.flushed)
- })
- wf.flusher.Flush()
-}
-
-// Flushed returns the state of flushed.
-// If it's flushed, return true, or else it return false.
-func (wf *WriteFlusher) Flushed() bool {
- // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to
- // be used to detect whether or a response code has been issued or not.
- // Another hook should be used instead.
- var flushed bool
- select {
- case <-wf.flushed:
- flushed = true
- default:
- }
- return flushed
-}
-
-// Close closes the write flusher, disallowing any further writes to the
-// target. After the flusher is closed, all calls to write or flush will
-// result in an error.
-func (wf *WriteFlusher) Close() error {
- wf.closeLock.Lock()
- defer wf.closeLock.Unlock()
-
- select {
- case <-wf.closed:
- return errWriteFlusherClosed
- default:
- close(wf.closed)
- }
- return nil
-}
-
-// NewWriteFlusher returns a new WriteFlusher.
-func NewWriteFlusher(w io.Writer) *WriteFlusher {
- var fl flusher
- if f, ok := w.(flusher); ok {
- fl = f
- } else {
- fl = &NopFlusher{}
- }
- return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})}
-}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go
deleted file mode 100644
index ccc7f9c23..000000000
--- a/vendor/github.com/docker/docker/pkg/ioutils/writers.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package ioutils
-
-import "io"
-
-// NopWriter represents a type which write operation is nop.
-type NopWriter struct{}
-
-func (*NopWriter) Write(buf []byte) (int, error) {
- return len(buf), nil
-}
-
-type nopWriteCloser struct {
- io.Writer
-}
-
-func (w *nopWriteCloser) Close() error { return nil }
-
-// NopWriteCloser returns a nopWriteCloser.
-func NopWriteCloser(w io.Writer) io.WriteCloser {
- return &nopWriteCloser{w}
-}
-
-// NopFlusher represents a type which flush operation is nop.
-type NopFlusher struct{}
-
-// Flush is a nop operation.
-func (f *NopFlusher) Flush() {}
-
-type writeCloserWrapper struct {
- io.Writer
- closer func() error
-}
-
-func (r *writeCloserWrapper) Close() error {
- return r.closer()
-}
-
-// NewWriteCloserWrapper returns a new io.WriteCloser.
-func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
- return &writeCloserWrapper{
- Writer: r,
- closer: closer,
- }
-}
-
-// WriteCounter wraps a concrete io.Writer and hold a count of the number
-// of bytes written to the writer during a "session".
-// This can be convenient when write return is masked
-// (e.g., json.Encoder.Encode())
-type WriteCounter struct {
- Count int64
- Writer io.Writer
-}
-
-// NewWriteCounter returns a new WriteCounter.
-func NewWriteCounter(w io.Writer) *WriteCounter {
- return &WriteCounter{
- Writer: w,
- }
-}
-
-func (wc *WriteCounter) Write(p []byte) (count int, err error) {
- count, err = wc.Writer.Write(p)
- wc.Count += int64(count)
- return
-}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_linux.go
deleted file mode 100644
index 461dc097c..000000000
--- a/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_linux.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package devices
-
-import (
- "errors"
- "io/ioutil"
- "os"
- "path/filepath"
-
- "github.com/opencontainers/runc/libcontainer/configs"
-
- "golang.org/x/sys/unix"
-)
-
-var (
- ErrNotADevice = errors.New("not a device node")
-)
-
-// Testing dependencies
-var (
- unixLstat = unix.Lstat
- ioutilReadDir = ioutil.ReadDir
-)
-
-// Given the path to a device and its cgroup_permissions(which cannot be easily queried) look up the information about a linux device and return that information as a Device struct.
-func DeviceFromPath(path, permissions string) (*configs.Device, error) {
- var stat unix.Stat_t
- err := unixLstat(path, &stat)
- if err != nil {
- return nil, err
- }
- var (
- devType rune
- mode = stat.Mode
- )
- switch {
- case mode&unix.S_IFBLK == unix.S_IFBLK:
- devType = 'b'
- case mode&unix.S_IFCHR == unix.S_IFCHR:
- devType = 'c'
- default:
- return nil, ErrNotADevice
- }
- devNumber := int(stat.Rdev)
- uid := stat.Uid
- gid := stat.Gid
- return &configs.Device{
- Type: devType,
- Path: path,
- Major: Major(devNumber),
- Minor: Minor(devNumber),
- Permissions: permissions,
- FileMode: os.FileMode(mode),
- Uid: uid,
- Gid: gid,
- }, nil
-}
-
-func HostDevices() ([]*configs.Device, error) {
- return getDevices("/dev")
-}
-
-func getDevices(path string) ([]*configs.Device, error) {
- files, err := ioutilReadDir(path)
- if err != nil {
- return nil, err
- }
- out := []*configs.Device{}
- for _, f := range files {
- switch {
- case f.IsDir():
- switch f.Name() {
- // ".lxc" & ".lxd-mounts" added to address https://github.com/lxc/lxd/issues/2825
- case "pts", "shm", "fd", "mqueue", ".lxc", ".lxd-mounts":
- continue
- default:
- sub, err := getDevices(filepath.Join(path, f.Name()))
- if err != nil {
- return nil, err
- }
-
- out = append(out, sub...)
- continue
- }
- case f.Name() == "console":
- continue
- }
- device, err := DeviceFromPath(filepath.Join(path, f.Name()), "rwm")
- if err != nil {
- if err == ErrNotADevice {
- continue
- }
- if os.IsNotExist(err) {
- continue
- }
- return nil, err
- }
- out = append(out, device)
- }
- return out, nil
-}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unsupported.go
deleted file mode 100644
index 6649b9f2d..000000000
--- a/vendor/github.com/opencontainers/runc/libcontainer/devices/devices_unsupported.go
+++ /dev/null
@@ -1,3 +0,0 @@
-// +build !linux
-
-package devices
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/devices/number.go b/vendor/github.com/opencontainers/runc/libcontainer/devices/number.go
deleted file mode 100644
index 885b6e5dd..000000000
--- a/vendor/github.com/opencontainers/runc/libcontainer/devices/number.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// +build linux freebsd
-
-package devices
-
-/*
-
-This code provides support for manipulating linux device numbers. It should be replaced by normal syscall functions once http://code.google.com/p/go/issues/detail?id=8106 is solved.
-
-You can read what they are here:
-
- - http://www.makelinux.net/ldd3/chp-3-sect-2
- - http://www.linux-tutorial.info/modules.php?name=MContent&pageid=94
-
-Note! These are NOT the same as the MAJOR(dev_t device);, MINOR(dev_t device); and MKDEV(int major, int minor); functions as defined in <linux/kdev_t.h> as the representation of device numbers used by go is different than the one used internally to the kernel! - https://github.com/torvalds/linux/blob/master/include/linux/kdev_t.h#L9
-
-*/
-
-func Major(devNumber int) int64 {
- return int64((devNumber >> 8) & 0xfff)
-}
-
-func Minor(devNumber int) int64 {
- return int64((devNumber & 0xff) | ((devNumber >> 12) & 0xfff00))
-}
diff --git a/vendor/github.com/opencontainers/runtime-tools/filepath/abs.go b/vendor/github.com/opencontainers/runtime-tools/filepath/abs.go
new file mode 100644
index 000000000..c19bba26a
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-tools/filepath/abs.go
@@ -0,0 +1,52 @@
+package filepath
+
+import (
+ "errors"
+ "regexp"
+ "strings"
+)
+
+var windowsAbs = regexp.MustCompile(`^[a-zA-Z]:\\.*$`)
+
+// Abs is a version of path/filepath's Abs with an explicit operating
+// system and current working directory.
+func Abs(os, path, cwd string) (_ string, err error) {
+ if os == "windows" {
+ return "", errors.New("Abs() does not support windows yet")
+ }
+ if IsAbs(os, path) {
+ return Clean(os, path), nil
+ }
+ return Clean(os, Join(os, cwd, path)), nil
+}
+
+// IsAbs is a version of path/filepath's IsAbs with an explicit
+// operating system.
+func IsAbs(os, path string) bool {
+ if os == "windows" {
+ // FIXME: copy hideous logic from Go's
+ // src/path/filepath/path_windows.go into somewhere where we can
+ // put 3-clause BSD licensed code.
+ return windowsAbs.MatchString(path)
+ }
+ sep := Separator(os)
+
+ // POSIX has [1]:
+ //
+ // > If a pathname begins with two successive <slash> characters,
+ // > the first component following the leading <slash> characters
+ // > may be interpreted in an implementation-defined manner,
+ // > although more than two leading <slash> characters shall be
+ // > treated as a single <slash> character.
+ //
+ // And Boost treats // as non-absolute [2], but Linux [3,4], Python
+ // [5] and Go [6] all treat // as absolute.
+ //
+ // [1]: http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap04.html#tag_04_13
+ // [2]: https://github.com/boostorg/filesystem/blob/boost-1.64.0/test/path_test.cpp#L861
+ // [3]: http://man7.org/linux/man-pages/man7/path_resolution.7.html
+ // [4]: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/filesystems/path-lookup.md?h=v4.12#n41
+ // [5]: https://github.com/python/cpython/blob/v3.6.1/Lib/posixpath.py#L64-L66
+ // [6]: https://go.googlesource.com/go/+/go1.8.3/src/path/path.go#199
+ return strings.HasPrefix(path, string(sep))
+}
diff --git a/vendor/github.com/opencontainers/runtime-tools/filepath/ancestor.go b/vendor/github.com/opencontainers/runtime-tools/filepath/ancestor.go
new file mode 100644
index 000000000..896cd8206
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-tools/filepath/ancestor.go
@@ -0,0 +1,32 @@
+package filepath
+
+import (
+ "fmt"
+ "strings"
+)
+
+// IsAncestor returns true when pathB is an strict ancestor of pathA,
+// and false where the paths are equal or pathB is outside of pathA.
+// Paths that are not absolute will be made absolute with Abs.
+func IsAncestor(os, pathA, pathB, cwd string) (_ bool, err error) {
+ if pathA == pathB {
+ return false, nil
+ }
+
+ pathA, err = Abs(os, pathA, cwd)
+ if err != nil {
+ return false, err
+ }
+ pathB, err = Abs(os, pathB, cwd)
+ if err != nil {
+ return false, err
+ }
+ sep := Separator(os)
+ if !strings.HasSuffix(pathA, string(sep)) {
+ pathA = fmt.Sprintf("%s%c", pathA, sep)
+ }
+ if pathA == pathB {
+ return false, nil
+ }
+ return strings.HasPrefix(pathB, pathA), nil
+}
diff --git a/vendor/github.com/opencontainers/runtime-tools/filepath/clean.go b/vendor/github.com/opencontainers/runtime-tools/filepath/clean.go
new file mode 100644
index 000000000..b70c575f2
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-tools/filepath/clean.go
@@ -0,0 +1,56 @@
+package filepath
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Clean is an explicit-OS version of path/filepath's Clean.
+func Clean(os, path string) string {
+ abs := IsAbs(os, path)
+ sep := Separator(os)
+ elements := strings.Split(path, string(sep))
+
+ // Replace multiple Separator elements with a single one.
+ for i := 0; i < len(elements); i++ {
+ if len(elements[i]) == 0 {
+ elements = append(elements[:i], elements[i+1:]...)
+ i--
+ }
+ }
+
+ // Eliminate each . path name element (the current directory).
+ for i := 0; i < len(elements); i++ {
+ if elements[i] == "." && len(elements) > 1 {
+ elements = append(elements[:i], elements[i+1:]...)
+ i--
+ }
+ }
+
+ // Eliminate each inner .. path name element (the parent directory)
+ // along with the non-.. element that precedes it.
+ for i := 1; i < len(elements); i++ {
+ if i > 0 && elements[i] == ".." {
+ elements = append(elements[:i-1], elements[i+1:]...)
+ i -= 2
+ }
+ }
+
+ // Eliminate .. elements that begin a rooted path:
+ // that is, replace "/.." by "/" at the beginning of a path,
+ // assuming Separator is '/'.
+ if abs && len(elements) > 0 {
+ for elements[0] == ".." {
+ elements = elements[1:]
+ }
+ }
+
+ cleaned := strings.Join(elements, string(sep))
+ if abs {
+ cleaned = fmt.Sprintf("%c%s", sep, cleaned)
+ }
+ if cleaned == path {
+ return path
+ }
+ return Clean(os, cleaned)
+}
diff --git a/vendor/github.com/opencontainers/runtime-tools/filepath/doc.go b/vendor/github.com/opencontainers/runtime-tools/filepath/doc.go
new file mode 100644
index 000000000..7ee085bf4
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-tools/filepath/doc.go
@@ -0,0 +1,6 @@
+// Package filepath implements Go's filepath package with explicit
+// operating systems (and for some functions and explicit working
+// directory). This allows tools built for one OS to operate on paths
+// targeting another OS. For example, a Linux build can determine
+// whether a path is absolute on Linux or on Windows.
+package filepath
diff --git a/vendor/github.com/opencontainers/runtime-tools/filepath/join.go b/vendor/github.com/opencontainers/runtime-tools/filepath/join.go
new file mode 100644
index 000000000..b865d237c
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-tools/filepath/join.go
@@ -0,0 +1,9 @@
+package filepath
+
+import "strings"
+
+// Join is an explicit-OS version of path/filepath's Join.
+func Join(os string, elem ...string) string {
+ sep := Separator(os)
+ return Clean(os, strings.Join(elem, string(sep)))
+}
diff --git a/vendor/github.com/opencontainers/runtime-tools/filepath/separator.go b/vendor/github.com/opencontainers/runtime-tools/filepath/separator.go
new file mode 100644
index 000000000..2c5e8905a
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-tools/filepath/separator.go
@@ -0,0 +1,9 @@
+package filepath
+
+// Separator is an explicit-OS version of path/filepath's Separator.
+func Separator(os string) rune {
+ if os == "windows" {
+ return '\\'
+ }
+ return '/'
+}
diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/generate.go b/vendor/github.com/opencontainers/runtime-tools/generate/generate.go
index fce88f5e2..5a1f5543e 100644
--- a/vendor/github.com/opencontainers/runtime-tools/generate/generate.go
+++ b/vendor/github.com/opencontainers/runtime-tools/generate/generate.go
@@ -17,6 +17,12 @@ import (
var (
// Namespaces include the names of supported namespaces.
Namespaces = []string{"network", "pid", "mount", "ipc", "uts", "user", "cgroup"}
+
+ // we don't care about order...and this is way faster...
+ removeFunc = func(s []string, i int) []string {
+ s[i] = s[len(s)-1]
+ return s[:len(s)-1]
+ }
)
// Generator represents a generator for a container spec.
@@ -35,7 +41,7 @@ func New() Generator {
spec := rspec.Spec{
Version: rspec.Version,
Root: &rspec.Root{
- Path: "",
+ Path: "rootfs",
Readonly: false,
},
Process: &rspec.Process{
@@ -392,7 +398,7 @@ func (g *Generator) SetProcessArgs(args []string) {
// ClearProcessEnv clears g.spec.Process.Env.
func (g *Generator) ClearProcessEnv() {
- if g.spec == nil {
+ if g.spec == nil || g.spec.Process == nil {
return
}
g.spec.Process.Env = []string{}
@@ -434,7 +440,7 @@ func (g *Generator) AddProcessRlimits(rType string, rHard uint64, rSoft uint64)
// RemoveProcessRlimits removes a rlimit from g.spec.Process.Rlimits.
func (g *Generator) RemoveProcessRlimits(rType string) error {
- if g.spec == nil {
+ if g.spec == nil || g.spec.Process == nil {
return nil
}
for i, rlimit := range g.spec.Process.Rlimits {
@@ -448,7 +454,7 @@ func (g *Generator) RemoveProcessRlimits(rType string) error {
// ClearProcessRlimits clear g.spec.Process.Rlimits.
func (g *Generator) ClearProcessRlimits() {
- if g.spec == nil {
+ if g.spec == nil || g.spec.Process == nil {
return
}
g.spec.Process.Rlimits = []rspec.POSIXRlimit{}
@@ -456,7 +462,7 @@ func (g *Generator) ClearProcessRlimits() {
// ClearProcessAdditionalGids clear g.spec.Process.AdditionalGids.
func (g *Generator) ClearProcessAdditionalGids() {
- if g.spec == nil {
+ if g.spec == nil || g.spec.Process == nil {
return
}
g.spec.Process.User.AdditionalGids = []uint32{}
@@ -716,13 +722,11 @@ func (g *Generator) SetLinuxRootPropagation(rp string) error {
switch rp {
case "":
case "private":
- case "rprivate":
case "slave":
- case "rslave":
case "shared":
- case "rshared":
+ case "unbindable":
default:
- return fmt.Errorf("rootfs-propagation must be empty or one of private|rprivate|slave|rslave|shared|rshared")
+ return fmt.Errorf("rootfs-propagation must be empty or one of private|slave|shared|unbindable")
}
g.initSpecLinux()
g.spec.Linux.RootfsPropagation = rp
@@ -731,10 +735,7 @@ func (g *Generator) SetLinuxRootPropagation(rp string) error {
// ClearPreStartHooks clear g.spec.Hooks.Prestart.
func (g *Generator) ClearPreStartHooks() {
- if g.spec == nil {
- return
- }
- if g.spec.Hooks == nil {
+ if g.spec == nil || g.spec.Hooks == nil {
return
}
g.spec.Hooks.Prestart = []rspec.Hook{}
@@ -781,10 +782,7 @@ func (g *Generator) AddPreStartHookTimeout(path string, timeout int) {
// ClearPostStopHooks clear g.spec.Hooks.Poststop.
func (g *Generator) ClearPostStopHooks() {
- if g.spec == nil {
- return
- }
- if g.spec.Hooks == nil {
+ if g.spec == nil || g.spec.Hooks == nil {
return
}
g.spec.Hooks.Poststop = []rspec.Hook{}
@@ -831,10 +829,7 @@ func (g *Generator) AddPostStopHookTimeout(path string, timeout int) {
// ClearPostStartHooks clear g.spec.Hooks.Poststart.
func (g *Generator) ClearPostStartHooks() {
- if g.spec == nil {
- return
- }
- if g.spec.Hooks == nil {
+ if g.spec == nil || g.spec.Hooks == nil {
return
}
g.spec.Hooks.Poststart = []rspec.Hook{}
@@ -970,7 +965,7 @@ func (g *Generator) SetupPrivileged(privileged bool) {
// ClearProcessCapabilities clear g.spec.Process.Capabilities.
func (g *Generator) ClearProcessCapabilities() {
- if g.spec == nil {
+ if g.spec == nil || g.spec.Process == nil || g.spec.Process.Capabilities == nil {
return
}
g.spec.Process.Capabilities.Bounding = []string{}
@@ -980,8 +975,32 @@ func (g *Generator) ClearProcessCapabilities() {
g.spec.Process.Capabilities.Ambient = []string{}
}
-// AddProcessCapability adds a process capability into g.spec.Process.Capabilities.
-func (g *Generator) AddProcessCapability(c string) error {
+// AddProcessCapabilityAmbient adds a process capability into g.spec.Process.Capabilities.Ambient.
+func (g *Generator) AddProcessCapabilityAmbient(c string) error {
+ cp := strings.ToUpper(c)
+ if err := validate.CapValid(cp, g.HostSpecific); err != nil {
+ return err
+ }
+
+ g.initSpecProcessCapabilities()
+
+ var foundAmbient bool
+ for _, cap := range g.spec.Process.Capabilities.Ambient {
+ if strings.ToUpper(cap) == cp {
+ foundAmbient = true
+ break
+ }
+ }
+
+ if !foundAmbient {
+ g.spec.Process.Capabilities.Ambient = append(g.spec.Process.Capabilities.Ambient, cp)
+ }
+
+ return nil
+}
+
+// AddProcessCapabilityBounding adds a process capability into g.spec.Process.Capabilities.Bounding.
+func (g *Generator) AddProcessCapabilityBounding(c string) error {
cp := strings.ToUpper(c)
if err := validate.CapValid(cp, g.HostSpecific); err != nil {
return err
@@ -1000,6 +1019,18 @@ func (g *Generator) AddProcessCapability(c string) error {
g.spec.Process.Capabilities.Bounding = append(g.spec.Process.Capabilities.Bounding, cp)
}
+ return nil
+}
+
+// AddProcessCapabilityEffective adds a process capability into g.spec.Process.Capabilities.Effective.
+func (g *Generator) AddProcessCapabilityEffective(c string) error {
+ cp := strings.ToUpper(c)
+ if err := validate.CapValid(cp, g.HostSpecific); err != nil {
+ return err
+ }
+
+ g.initSpecProcessCapabilities()
+
var foundEffective bool
for _, cap := range g.spec.Process.Capabilities.Effective {
if strings.ToUpper(cap) == cp {
@@ -1011,6 +1042,18 @@ func (g *Generator) AddProcessCapability(c string) error {
g.spec.Process.Capabilities.Effective = append(g.spec.Process.Capabilities.Effective, cp)
}
+ return nil
+}
+
+// AddProcessCapabilityInheritable adds a process capability into g.spec.Process.Capabilities.Inheritable.
+func (g *Generator) AddProcessCapabilityInheritable(c string) error {
+ cp := strings.ToUpper(c)
+ if err := validate.CapValid(cp, g.HostSpecific); err != nil {
+ return err
+ }
+
+ g.initSpecProcessCapabilities()
+
var foundInheritable bool
for _, cap := range g.spec.Process.Capabilities.Inheritable {
if strings.ToUpper(cap) == cp {
@@ -1022,6 +1065,18 @@ func (g *Generator) AddProcessCapability(c string) error {
g.spec.Process.Capabilities.Inheritable = append(g.spec.Process.Capabilities.Inheritable, cp)
}
+ return nil
+}
+
+// AddProcessCapabilityPermitted adds a process capability into g.spec.Process.Capabilities.Permitted.
+func (g *Generator) AddProcessCapabilityPermitted(c string) error {
+ cp := strings.ToUpper(c)
+ if err := validate.CapValid(cp, g.HostSpecific); err != nil {
+ return err
+ }
+
+ g.initSpecProcessCapabilities()
+
var foundPermitted bool
for _, cap := range g.spec.Process.Capabilities.Permitted {
if strings.ToUpper(cap) == cp {
@@ -1033,66 +1088,85 @@ func (g *Generator) AddProcessCapability(c string) error {
g.spec.Process.Capabilities.Permitted = append(g.spec.Process.Capabilities.Permitted, cp)
}
- var foundAmbient bool
- for _, cap := range g.spec.Process.Capabilities.Ambient {
+ return nil
+}
+
+// DropProcessCapabilityAmbient drops a process capability from g.spec.Process.Capabilities.Ambient.
+func (g *Generator) DropProcessCapabilityAmbient(c string) error {
+ cp := strings.ToUpper(c)
+
+ g.initSpecProcessCapabilities()
+
+ for i, cap := range g.spec.Process.Capabilities.Ambient {
if strings.ToUpper(cap) == cp {
- foundAmbient = true
- break
+ g.spec.Process.Capabilities.Ambient = removeFunc(g.spec.Process.Capabilities.Ambient, i)
}
}
- if !foundAmbient {
- g.spec.Process.Capabilities.Ambient = append(g.spec.Process.Capabilities.Ambient, cp)
- }
- return nil
+ return validate.CapValid(cp, false)
}
-// DropProcessCapability drops a process capability from g.spec.Process.Capabilities.
-func (g *Generator) DropProcessCapability(c string) error {
+// DropProcessCapabilityBounding drops a process capability from g.spec.Process.Capabilities.Bounding.
+func (g *Generator) DropProcessCapabilityBounding(c string) error {
cp := strings.ToUpper(c)
- if err := validate.CapValid(cp, g.HostSpecific); err != nil {
- return err
- }
g.initSpecProcessCapabilities()
- // we don't care about order...and this is way faster...
- removeFunc := func(s []string, i int) []string {
- s[i] = s[len(s)-1]
- return s[:len(s)-1]
- }
-
for i, cap := range g.spec.Process.Capabilities.Bounding {
if strings.ToUpper(cap) == cp {
g.spec.Process.Capabilities.Bounding = removeFunc(g.spec.Process.Capabilities.Bounding, i)
}
}
+ return validate.CapValid(cp, false)
+}
+
+// DropProcessCapabilityEffective drops a process capability from g.spec.Process.Capabilities.Effective.
+func (g *Generator) DropProcessCapabilityEffective(c string) error {
+ cp := strings.ToUpper(c)
+
+ g.initSpecProcessCapabilities()
+
for i, cap := range g.spec.Process.Capabilities.Effective {
if strings.ToUpper(cap) == cp {
g.spec.Process.Capabilities.Effective = removeFunc(g.spec.Process.Capabilities.Effective, i)
}
}
+ return validate.CapValid(cp, false)
+}
+
+// DropProcessCapabilityInheritable drops a process capability from g.spec.Process.Capabilities.Inheritable.
+func (g *Generator) DropProcessCapabilityInheritable(c string) error {
+ cp := strings.ToUpper(c)
+ if err := validate.CapValid(cp, g.HostSpecific); err != nil {
+ return err
+ }
+
+ g.initSpecProcessCapabilities()
+
for i, cap := range g.spec.Process.Capabilities.Inheritable {
if strings.ToUpper(cap) == cp {
g.spec.Process.Capabilities.Inheritable = removeFunc(g.spec.Process.Capabilities.Inheritable, i)
}
}
- for i, cap := range g.spec.Process.Capabilities.Permitted {
- if strings.ToUpper(cap) == cp {
- g.spec.Process.Capabilities.Permitted = removeFunc(g.spec.Process.Capabilities.Permitted, i)
- }
- }
+ return validate.CapValid(cp, false)
+}
- for i, cap := range g.spec.Process.Capabilities.Ambient {
+// DropProcessCapabilityPermitted drops a process capability from g.spec.Process.Capabilities.Permitted.
+func (g *Generator) DropProcessCapabilityPermitted(c string) error {
+ cp := strings.ToUpper(c)
+
+ g.initSpecProcessCapabilities()
+
+ for i, cap := range g.spec.Process.Capabilities.Permitted {
if strings.ToUpper(cap) == cp {
g.spec.Process.Capabilities.Ambient = removeFunc(g.spec.Process.Capabilities.Ambient, i)
}
}
- return nil
+ return validate.CapValid(cp, false)
}
func mapStrToNamespace(ns string, path string) (rspec.LinuxNamespace, error) {
@@ -1203,6 +1277,39 @@ func (g *Generator) ClearLinuxDevices() {
g.spec.Linux.Devices = []rspec.LinuxDevice{}
}
+// AddLinuxResourcesDevice - add a device into g.spec.Linux.Resources.Devices
+func (g *Generator) AddLinuxResourcesDevice(allow bool, devType string, major, minor *int64, access string) {
+ g.initSpecLinuxResources()
+
+ device := rspec.LinuxDeviceCgroup{
+ Allow: allow,
+ Type: devType,
+ Access: access,
+ Major: major,
+ Minor: minor,
+ }
+ g.spec.Linux.Resources.Devices = append(g.spec.Linux.Resources.Devices, device)
+}
+
+// RemoveLinuxResourcesDevice - remove a device from g.spec.Linux.Resources.Devices
+func (g *Generator) RemoveLinuxResourcesDevice(allow bool, devType string, major, minor *int64, access string) {
+ if g.spec == nil || g.spec.Linux == nil || g.spec.Linux.Resources == nil {
+ return
+ }
+ for i, device := range g.spec.Linux.Resources.Devices {
+ if device.Allow == allow &&
+ (devType == device.Type || (devType != "" && device.Type != "" && devType == device.Type)) &&
+ (access == device.Access || (access != "" && device.Access != "" && access == device.Access)) &&
+ (major == device.Major || (major != nil && device.Major != nil && *major == *device.Major)) &&
+ (minor == device.Minor || (minor != nil && device.Minor != nil && *minor == *device.Minor)) {
+
+ g.spec.Linux.Resources.Devices = append(g.spec.Linux.Resources.Devices[:i], g.spec.Linux.Resources.Devices[i+1:]...)
+ return
+ }
+ }
+ return
+}
+
// strPtr returns the pointer pointing to the string s.
func strPtr(s string) *string { return &s }
diff --git a/vendor/github.com/opencontainers/runtime-tools/specerror/bundle.go b/vendor/github.com/opencontainers/runtime-tools/specerror/bundle.go
new file mode 100644
index 000000000..0a6b2d423
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-tools/specerror/bundle.go
@@ -0,0 +1,29 @@
+package specerror
+
+import (
+ "fmt"
+
+ rfc2119 "github.com/opencontainers/runtime-tools/error"
+)
+
+// define error codes
+const (
+ // ConfigInRootBundleDir represents "This REQUIRED file MUST reside in the root of the bundle directory"
+ ConfigInRootBundleDir = "This REQUIRED file MUST reside in the root of the bundle directory."
+ // ConfigConstName represents "This REQUIRED file MUST be named `config.json`."
+ ConfigConstName = "This REQUIRED file MUST be named `config.json`."
+ // ArtifactsInSingleDir represents "When supplied, while these artifacts MUST all be present in a single directory on the local filesystem, that directory itself is not part of the bundle."
+ ArtifactsInSingleDir = "When supplied, while these artifacts MUST all be present in a single directory on the local filesystem, that directory itself is not part of the bundle."
+)
+
+var (
+ containerFormatRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "bundle.md#container-format"), nil
+ }
+)
+
+func init() {
+ register(ConfigInRootBundleDir, rfc2119.Must, containerFormatRef)
+ register(ConfigConstName, rfc2119.Must, containerFormatRef)
+ register(ArtifactsInSingleDir, rfc2119.Must, containerFormatRef)
+}
diff --git a/vendor/github.com/opencontainers/runtime-tools/specerror/config-linux.go b/vendor/github.com/opencontainers/runtime-tools/specerror/config-linux.go
new file mode 100644
index 000000000..2967adcef
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-tools/specerror/config-linux.go
@@ -0,0 +1,134 @@
+package specerror
+
+import (
+ "fmt"
+
+ rfc2119 "github.com/opencontainers/runtime-tools/error"
+)
+
+// define error codes
+const (
+ // DefaultFilesystems represents "The following filesystems SHOULD be made available in each container's filesystem:"
+ DefaultFilesystems = "The following filesystems SHOULD be made available in each container's filesystem:"
+ // NSPathAbs represents "This value MUST be an absolute path in the runtime mount namespace."
+ NSPathAbs = "This value MUST be an absolute path in the runtime mount namespace."
+ // NSProcInPath represents "The runtime MUST place the container process in the namespace associated with that `path`."
+ NSProcInPath = "The runtime MUST place the container process in the namespace associated with that `path`."
+ // NSPathMatchTypeError represents "The runtime MUST generate an error if `path` is not associated with a namespace of type `type`."
+ NSPathMatchTypeError = "The runtime MUST generate an error if `path` is not associated with a namespace of type `type`."
+ // NSNewNSWithoutPath represents "If `path` is not specified, the runtime MUST create a new container namespace of type `type`."
+ NSNewNSWithoutPath = "If `path` is not specified, the runtime MUST create a new container namespace of type `type`."
+ // NSInheritWithoutType represents "If a namespace type is not specified in the `namespaces` array, the container MUST inherit the runtime namespace of that type."
+ NSInheritWithoutType = "If a namespace type is not specified in the `namespaces` array, the container MUST inherit the runtime namespace of that type."
+ // NSErrorOnDup represents "If a `namespaces` field contains duplicated namespaces with same `type`, the runtime MUST generate an error."
+ NSErrorOnDup = "If a `namespaces` field contains duplicated namespaces with same `type`, the runtime MUST generate an error."
+ // UserNSMapOwnershipRO represents "The runtime SHOULD NOT modify the ownership of referenced filesystems to realize the mapping."
+ UserNSMapOwnershipRO = "The runtime SHOULD NOT modify the ownership of referenced filesystems to realize the mapping."
+ // DevicesAvailable represents "devices (array of objects, OPTIONAL) lists devices that MUST be available in the container."
+ DevicesAvailable = "devices (array of objects, OPTIONAL) lists devices that MUST be available in the container."
+ // DevicesFileNotMatch represents "If a file already exists at `path` that does not match the requested device, the runtime MUST generate an error."
+ DevicesFileNotMatch = "If a file already exists at `path` that does not match the requested device, the runtime MUST generate an error."
+ // DevicesMajMinRequired represents "`major, minor` (int64, REQUIRED unless `type` is `p`) - major, minor numbers for the device."
+ DevicesMajMinRequired = "`major, minor` (int64, REQUIRED unless `type` is `p`) - major, minor numbers for the device."
+ // DevicesErrorOnDup represents "The same `type`, `major` and `minor` SHOULD NOT be used for multiple devices."
+ DevicesErrorOnDup = "The same `type`, `major` and `minor` SHOULD NOT be used for multiple devices."
+ // DefaultDevices represents "In addition to any devices configured with this setting, the runtime MUST also supply default devices."
+ DefaultDevices = "In addition to any devices configured with this setting, the runtime MUST also supply default devices."
+ // CgroupsPathAbsOrRel represents "The value of `cgroupsPath` MUST be either an absolute path or a relative path."
+ CgroupsPathAbsOrRel = "The value of `cgroupsPath` MUST be either an absolute path or a relative path."
+ // CgroupsAbsPathRelToMount represents "In the case of an absolute path (starting with `/`), the runtime MUST take the path to be relative to the cgroups mount point."
+ CgroupsAbsPathRelToMount = "In the case of an absolute path (starting with `/`), the runtime MUST take the path to be relative to the cgroups mount point."
+ // CgroupsPathAttach represents "If the value is specified, the runtime MUST consistently attach to the same place in the cgroups hierarchy given the same value of `cgroupsPath`."
+ CgroupsPathAttach = "If the value is specified, the runtime MUST consistently attach to the same place in the cgroups hierarchy given the same value of `cgroupsPath`."
+ // CgroupsPathError represents "Runtimes MAY consider certain `cgroupsPath` values to be invalid, and MUST generate an error if this is the case."
+ CgroupsPathError = "Runtimes MAY consider certain `cgroupsPath` values to be invalid, and MUST generate an error if this is the case."
+ // DevicesApplyInOrder represents "The runtime MUST apply entries in the listed order."
+ DevicesApplyInOrder = "The runtime MUST apply entries in the listed order."
+ // BlkIOWeightOrLeafWeightExist represents "You MUST specify at least one of `weight` or `leafWeight` in a given entry, and MAY specify both."
+ BlkIOWeightOrLeafWeightExist = "You MUST specify at least one of `weight` or `leafWeight` in a given entry, and MAY specify both."
+ // IntelRdtPIDWrite represents "If `intelRdt` is set, the runtime MUST write the container process ID to the `<container-id>/tasks` file in a mounted `resctrl` pseudo-filesystem, using the container ID from `start` and creating the `container-id` directory if necessary."
+ IntelRdtPIDWrite = "If `intelRdt` is set, the runtime MUST write the container process ID to the `<container-id>/tasks` file in a mounted `resctrl` pseudo-filesystem, using the container ID from `start` and creating the `<container-id>` directory if necessary."
+ // IntelRdtNoMountedResctrlError represents "If no mounted `resctrl` pseudo-filesystem is available in the runtime mount namespace, the runtime MUST generate an error."
+ IntelRdtNoMountedResctrlError = "If no mounted `resctrl` pseudo-filesystem is available in the runtime mount namespace, the runtime MUST generate an error."
+ // NotManipResctrlWithoutIntelRdt represents "If `intelRdt` is not set, the runtime MUST NOT manipulate any `resctrl` pseudo-filesystems."
+ NotManipResctrlWithoutIntelRdt = "If `intelRdt` is not set, the runtime MUST NOT manipulate any `resctrl` pseudo-filesystems."
+ // IntelRdtL3CacheSchemaWrite represents "If `l3CacheSchema` is set, runtimes MUST write the value to the `schemata` file in the `<container-id>` directory discussed in `intelRdt`."
+ IntelRdtL3CacheSchemaWrite = "If `l3CacheSchema` is set, runtimes MUST write the value to the `schemata` file in the `<container-id>` directory discussed in `intelRdt`."
+ // IntelRdtL3CacheSchemaNotWrite represents "If `l3CacheSchema` is not set, runtimes MUST NOT write to `schemata` files in any `resctrl` pseudo-filesystems."
+ IntelRdtL3CacheSchemaNotWrite = "If `l3CacheSchema` is not set, runtimes MUST NOT write to `schemata` files in any `resctrl` pseudo-filesystems."
+ // SeccSyscallsNamesRequired represents "`names` MUST contain at least one entry."
+ SeccSyscallsNamesRequired = "`names` MUST contain at least one entry."
+ // MaskedPathsAbs represents "maskedPaths (array of strings, OPTIONAL) will mask over the provided paths inside the container so that they cannot be read. The values MUST be absolute paths in the container namespace."
+ MaskedPathsAbs = "maskedPaths (array of strings, OPTIONAL) will mask over the provided paths inside the container so that they cannot be read. The values MUST be absolute paths in the container namespace."
+ // ReadonlyPathsAbs represents "readonlyPaths (array of strings, OPTIONAL) will set the provided paths as readonly inside the container. The values MUST be absolute paths in the container namespace."
+ ReadonlyPathsAbs = "readonlyPaths (array of strings, OPTIONAL) will set the provided paths as readonly inside the container. The values MUST be absolute paths in the container namespace."
+)
+
+var (
+ defaultFilesystemsRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config-linux.md#default-filesystems"), nil
+ }
+ namespacesRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config-linux.md#namespaces"), nil
+ }
+ userNamespaceMappingsRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config-linux.md#user-namespace-mappings"), nil
+ }
+ devicesRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config-linux.md#devices"), nil
+ }
+ defaultDevicesRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config-linux.md#default-devices"), nil
+ }
+ cgroupsPathRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config-linux.md#cgroups-path"), nil
+ }
+ deviceWhitelistRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config-linux.md#device-whitelist"), nil
+ }
+ blockIoRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config-linux.md#block-io"), nil
+ }
+ intelrdtRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config-linux.md#intelrdt"), nil
+ }
+ seccompRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config-linux.md#seccomp"), nil
+ }
+ maskedPathsRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config-linux.md#masked-paths"), nil
+ }
+ readonlyPathsRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config-linux.md#readonly-paths"), nil
+ }
+)
+
+func init() {
+ register(DefaultFilesystems, rfc2119.Should, defaultFilesystemsRef)
+ register(NSPathAbs, rfc2119.Must, namespacesRef)
+ register(NSProcInPath, rfc2119.Must, namespacesRef)
+ register(NSPathMatchTypeError, rfc2119.Must, namespacesRef)
+ register(NSNewNSWithoutPath, rfc2119.Must, namespacesRef)
+ register(NSInheritWithoutType, rfc2119.Must, namespacesRef)
+ register(NSErrorOnDup, rfc2119.Must, namespacesRef)
+ register(UserNSMapOwnershipRO, rfc2119.Should, userNamespaceMappingsRef)
+ register(DevicesAvailable, rfc2119.Must, devicesRef)
+ register(DevicesFileNotMatch, rfc2119.Must, devicesRef)
+ register(DevicesMajMinRequired, rfc2119.Required, devicesRef)
+ register(DevicesErrorOnDup, rfc2119.Should, devicesRef)
+ register(DefaultDevices, rfc2119.Must, defaultDevicesRef)
+ register(CgroupsPathAbsOrRel, rfc2119.Must, cgroupsPathRef)
+ register(CgroupsAbsPathRelToMount, rfc2119.Must, cgroupsPathRef)
+ register(CgroupsPathAttach, rfc2119.Must, cgroupsPathRef)
+ register(CgroupsPathError, rfc2119.Must, cgroupsPathRef)
+ register(DevicesApplyInOrder, rfc2119.Must, deviceWhitelistRef)
+ register(BlkIOWeightOrLeafWeightExist, rfc2119.Must, blockIoRef)
+ register(IntelRdtPIDWrite, rfc2119.Must, intelrdtRef)
+ register(IntelRdtNoMountedResctrlError, rfc2119.Must, intelrdtRef)
+ register(NotManipResctrlWithoutIntelRdt, rfc2119.Must, intelrdtRef)
+ register(IntelRdtL3CacheSchemaWrite, rfc2119.Must, intelrdtRef)
+ register(IntelRdtL3CacheSchemaNotWrite, rfc2119.Must, intelrdtRef)
+ register(SeccSyscallsNamesRequired, rfc2119.Must, seccompRef)
+ register(MaskedPathsAbs, rfc2119.Must, maskedPathsRef)
+ register(ReadonlyPathsAbs, rfc2119.Must, readonlyPathsRef)
+}
diff --git a/vendor/github.com/opencontainers/runtime-tools/specerror/config-windows.go b/vendor/github.com/opencontainers/runtime-tools/specerror/config-windows.go
new file mode 100644
index 000000000..58765286b
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-tools/specerror/config-windows.go
@@ -0,0 +1,32 @@
+package specerror
+
+import (
+ "fmt"
+
+ rfc2119 "github.com/opencontainers/runtime-tools/error"
+)
+
+// define error codes
+const (
+ // WindowsLayerFoldersRequired represents "`layerFolders` MUST contain at least one entry."
+ WindowsLayerFoldersRequired = "`layerFolders` MUST contain at least one entry."
+ // WindowsHyperVPresent represents "If present, the container MUST be run with Hyper-V isolation."
+ WindowsHyperVPresent = "If present, the container MUST be run with Hyper-V isolation."
+ // WindowsHyperVOmit represents "If omitted, the container MUST be run as a Windows Server container."
+ WindowsHyperVOmit = "If omitted, the container MUST be run as a Windows Server container."
+)
+
+var (
+ layerfoldersRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config-windows.md#layerfolders"), nil
+ }
+ hypervRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config-windows.md#hyperv"), nil
+ }
+)
+
+func init() {
+ register(WindowsLayerFoldersRequired, rfc2119.Must, layerfoldersRef)
+ register(WindowsHyperVPresent, rfc2119.Must, hypervRef)
+ register(WindowsHyperVOmit, rfc2119.Must, hypervRef)
+}
diff --git a/vendor/github.com/opencontainers/runtime-tools/specerror/config.go b/vendor/github.com/opencontainers/runtime-tools/specerror/config.go
new file mode 100644
index 000000000..e59b459c1
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-tools/specerror/config.go
@@ -0,0 +1,188 @@
+package specerror
+
+import (
+ "fmt"
+
+ rfc2119 "github.com/opencontainers/runtime-tools/error"
+)
+
+// define error codes
+const (
+ // SpecVersionInSemVer represents "`ociVersion` (string, REQUIRED) MUST be in SemVer v2.0.0 format and specifies the version of the Open Container Initiative Runtime Specification with which the bundle complies."
+ SpecVersionInSemVer = "`ociVersion` (string, REQUIRED) MUST be in SemVer v2.0.0 format and specifies the version of the Open Container Initiative Runtime Specification with which the bundle complies."
+ // RootOnWindowsRequired represents "On Windows, for Windows Server Containers, this field is REQUIRED."
+ RootOnWindowsRequired = "On Windows, for Windows Server Containers, this field is REQUIRED."
+ // RootOnHyperVNotSet represents "For Hyper-V Containers, this field MUST NOT be set."
+ RootOnHyperVNotSet = "For Hyper-V Containers, this field MUST NOT be set."
+ // RootOnNonHyperVRequired represents "On all other platforms, this field is REQUIRED."
+ RootOnNonHyperVRequired = "On all other platforms, this field is REQUIRED."
+ // RootPathOnWindowsGUID represents "On Windows, `path` MUST be a volume GUID path."
+ RootPathOnWindowsGUID = "On Windows, `path` MUST be a volume GUID path."
+ // RootPathOnPosixConvention represents "The value SHOULD be the conventional `rootfs`."
+ RootPathOnPosixConvention = "The value SHOULD be the conventional `rootfs`."
+ // RootPathExist represents "A directory MUST exist at the path declared by the field."
+ RootPathExist = "A directory MUST exist at the path declared by the field."
+ // RootReadonlyImplement represents "`readonly` (bool, OPTIONAL) If true then the root filesystem MUST be read-only inside the container, defaults to false."
+ RootReadonlyImplement = "`readonly` (bool, OPTIONAL) If true then the root filesystem MUST be read-only inside the container, defaults to false."
+ // RootReadonlyOnWindowsFalse represents "* On Windows, this field MUST be omitted or false."
+ RootReadonlyOnWindowsFalse = "On Windows, this field MUST be omitted or false."
+ // MountsInOrder represents "The runtime MUST mount entries in the listed order."
+ MountsInOrder = "The runtime MUST mount entries in the listed order."
+ // MountsDestAbs represents "Destination of mount point: path inside container. This value MUST be an absolute path."
+ MountsDestAbs = "Destination of mount point: path inside container. This value MUST be an absolute path."
+ // MountsDestOnWindowsNotNested represents "Windows: one mount destination MUST NOT be nested within another mount (e.g., c:\\foo and c:\\foo\\bar)."
+ MountsDestOnWindowsNotNested = "Windows: one mount destination MUST NOT be nested within another mount (e.g., c:\\foo and c:\\foo\\bar)."
+ // MountsOptionsOnWindowsROSupport represents "Windows: runtimes MUST support `ro`, mounting the filesystem read-only when `ro` is given."
+ MountsOptionsOnWindowsROSupport = "Windows: runtimes MUST support `ro`, mounting the filesystem read-only when `ro` is given."
+ // ProcRequiredAtStart represents "This property is REQUIRED when `start` is called."
+ ProcRequiredAtStart = "This property is REQUIRED when `start` is called."
+ // ProcConsoleSizeIgnore represents "Runtimes MUST ignore `consoleSize` if `terminal` is `false` or unset."
+ ProcConsoleSizeIgnore = "Runtimes MUST ignore `consoleSize` if `terminal` is `false` or unset."
+ // ProcCwdAbs represents "cwd (string, REQUIRED) is the working directory that will be set for the executable. This value MUST be an absolute path."
+ ProcCwdAbs = "cwd (string, REQUIRED) is the working directory that will be set for the executable. This value MUST be an absolute path."
+ // ProcArgsOneEntryRequired represents "This specification extends the IEEE standard in that at least one entry is REQUIRED, and that entry is used with the same semantics as `execvp`'s *file*."
+ ProcArgsOneEntryRequired = "This specification extends the IEEE standard in that at least one entry is REQUIRED, and that entry is used with the same semantics as `execvp`'s *file*."
+ // PosixProcRlimitsTypeGenError represents "The runtime MUST generate an error for any values which cannot be mapped to a relevant kernel interface."
+ PosixProcRlimitsTypeGenError = "The runtime MUST generate an error for any values which cannot be mapped to a relevant kernel interface."
+ // PosixProcRlimitsTypeGet represents "For each entry in `rlimits`, a `getrlimit(3)` on `type` MUST succeed."
+ PosixProcRlimitsTypeGet = "For each entry in `rlimits`, a `getrlimit(3)` on `type` MUST succeed."
+ // PosixProcRlimitsTypeValueError represents "valid values are defined in the ... man page"
+ PosixProcRlimitsTypeValueError = "valid values are defined in the ... man page"
+ // PosixProcRlimitsSoftMatchCur represents "`rlim.rlim_cur` MUST match the configured value."
+ PosixProcRlimitsSoftMatchCur = "`rlim.rlim_cur` MUST match the configured value."
+ // PosixProcRlimitsHardMatchMax represents "`rlim.rlim_max` MUST match the configured value."
+ PosixProcRlimitsHardMatchMax = "`rlim.rlim_max` MUST match the configured value."
+ // PosixProcRlimitsErrorOnDup represents "If `rlimits` contains duplicated entries with same `type`, the runtime MUST generate an error."
+ PosixProcRlimitsErrorOnDup = "If `rlimits` contains duplicated entries with same `type`, the runtime MUST generate an error."
+ // LinuxProcCapError represents "Any value which cannot be mapped to a relevant kernel interface MUST cause an error."
+ LinuxProcCapError = "Any value which cannot be mapped to a relevant kernel interface MUST cause an error."
+ // LinuxProcOomScoreAdjSet represents "If `oomScoreAdj` is set, the runtime MUST set `oom_score_adj` to the given value."
+ LinuxProcOomScoreAdjSet = "If `oomScoreAdj` is set, the runtime MUST set `oom_score_adj` to the given value."
+ // LinuxProcOomScoreAdjNotSet represents "If `oomScoreAdj` is not set, the runtime MUST NOT change the value of `oom_score_adj`."
+ LinuxProcOomScoreAdjNotSet = "If `oomScoreAdj` is not set, the runtime MUST NOT change the value of `oom_score_adj`."
+ // PlatformSpecConfOnWindowsSet represents "This MUST be set if the target platform of this spec is `windows`."
+ PlatformSpecConfOnWindowsSet = "This MUST be set if the target platform of this spec is `windows`."
+ // PosixHooksPathAbs represents "This specification extends the IEEE standard in that `path` MUST be absolute."
+ PosixHooksPathAbs = "This specification extends the IEEE standard in that `path` MUST be absolute."
+ // PosixHooksTimeoutPositive represents "If set, `timeout` MUST be greater than zero."
+ PosixHooksTimeoutPositive = "If set, `timeout` MUST be greater than zero."
+ // PosixHooksCalledInOrder represents "Hooks MUST be called in the listed order."
+ PosixHooksCalledInOrder = "Hooks MUST be called in the listed order."
+ // PosixHooksStateToStdin represents "The state of the container MUST be passed to hooks over stdin so that they may do work appropriate to the current state of the container."
+ PosixHooksStateToStdin = "The state of the container MUST be passed to hooks over stdin so that they may do work appropriate to the current state of the container."
+ // PrestartTiming represents "The pre-start hooks MUST be called after the `start` operation is called but before the user-specified program command is executed."
+ PrestartTiming = "The pre-start hooks MUST be called after the `start` operation is called but before the user-specified program command is executed."
+ // PoststartTiming represents "The post-start hooks MUST be called after the user-specified process is executed but before the `start` operation returns."
+ PoststartTiming = "The post-start hooks MUST be called after the user-specified process is executed but before the `start` operation returns."
+ // PoststopTiming represents "The post-stop hooks MUST be called after the container is deleted but before the `delete` operation returns."
+ PoststopTiming = "The post-stop hooks MUST be called after the container is deleted but before the `delete` operation returns."
+ // AnnotationsKeyValueMap represents "Annotations MUST be a key-value map."
+ AnnotationsKeyValueMap = "Annotations MUST be a key-value map."
+ // AnnotationsKeyString represents "Keys MUST be strings."
+ AnnotationsKeyString = "Keys MUST be strings."
+ // AnnotationsKeyRequired represents "Keys MUST NOT be an empty string."
+ AnnotationsKeyRequired = "Keys MUST NOT be an empty string."
+ // AnnotationsKeyReversedDomain represents "Keys SHOULD be named using a reverse domain notation - e.g. `com.example.myKey`."
+ AnnotationsKeyReversedDomain = "Keys SHOULD be named using a reverse domain notation - e.g. `com.example.myKey`."
+ // AnnotationsKeyReservedNS represents "Keys using the `org.opencontainers` namespace are reserved and MUST NOT be used by subsequent specifications."
+ AnnotationsKeyReservedNS = "Keys using the `org.opencontainers` namespace are reserved and MUST NOT be used by subsequent specifications."
+ // AnnotationsKeyIgnoreUnknown represents "Implementations that are reading/processing this configuration file MUST NOT generate an error if they encounter an unknown annotation key."
+ AnnotationsKeyIgnoreUnknown = "Implementations that are reading/processing this configuration file MUST NOT generate an error if they encounter an unknown annotation key."
+ // AnnotationsValueString represents "Values MUST be strings."
+ AnnotationsValueString = "Values MUST be strings."
+ // ExtensibilityIgnoreUnknownProp represents "Runtimes that are reading or processing this configuration file MUST NOT generate an error if they encounter an unknown property."
+ ExtensibilityIgnoreUnknownProp = "Runtimes that are reading or processing this configuration file MUST NOT generate an error if they encounter an unknown property.\nInstead they MUST ignore unknown properties."
+ // ValidValues represents "Runtimes that are reading or processing this configuration file MUST generate an error when invalid or unsupported values are encountered."
+ ValidValues = "Runtimes that are reading or processing this configuration file MUST generate an error when invalid or unsupported values are encountered."
+)
+
+var (
+ specificationVersionRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config.md#specification-version"), nil
+ }
+ rootRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config.md#root"), nil
+ }
+ mountsRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config.md#mounts"), nil
+ }
+ processRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config.md#process"), nil
+ }
+ posixProcessRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config.md#posix-process"), nil
+ }
+ linuxProcessRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config.md#linux-process"), nil
+ }
+ platformSpecificConfigurationRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config.md#platform-specific-configuration"), nil
+ }
+ posixPlatformHooksRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config.md#posix-platform-hooks"), nil
+ }
+ prestartRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config.md#prestart"), nil
+ }
+ poststartRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config.md#poststart"), nil
+ }
+ poststopRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config.md#poststop"), nil
+ }
+ annotationsRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config.md#annotations"), nil
+ }
+ extensibilityRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config.md#extensibility"), nil
+ }
+ validValuesRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "config.md#valid-values"), nil
+ }
+)
+
+func init() {
+ register(SpecVersionInSemVer, rfc2119.Must, specificationVersionRef)
+ register(RootOnWindowsRequired, rfc2119.Required, rootRef)
+ register(RootOnHyperVNotSet, rfc2119.Must, rootRef)
+ register(RootOnNonHyperVRequired, rfc2119.Required, rootRef)
+ register(RootPathOnWindowsGUID, rfc2119.Must, rootRef)
+ register(RootPathOnPosixConvention, rfc2119.Should, rootRef)
+ register(RootPathExist, rfc2119.Must, rootRef)
+ register(RootReadonlyImplement, rfc2119.Must, rootRef)
+ register(RootReadonlyOnWindowsFalse, rfc2119.Must, rootRef)
+ register(MountsInOrder, rfc2119.Must, mountsRef)
+ register(MountsDestAbs, rfc2119.Must, mountsRef)
+ register(MountsDestOnWindowsNotNested, rfc2119.Must, mountsRef)
+ register(MountsOptionsOnWindowsROSupport, rfc2119.Must, mountsRef)
+ register(ProcRequiredAtStart, rfc2119.Required, processRef)
+ register(ProcConsoleSizeIgnore, rfc2119.Must, processRef)
+ register(ProcCwdAbs, rfc2119.Must, processRef)
+ register(ProcArgsOneEntryRequired, rfc2119.Required, processRef)
+ register(PosixProcRlimitsTypeGenError, rfc2119.Must, posixProcessRef)
+ register(PosixProcRlimitsTypeGet, rfc2119.Must, posixProcessRef)
+ register(PosixProcRlimitsTypeValueError, rfc2119.Should, posixProcessRef)
+ register(PosixProcRlimitsSoftMatchCur, rfc2119.Must, posixProcessRef)
+ register(PosixProcRlimitsHardMatchMax, rfc2119.Must, posixProcessRef)
+ register(PosixProcRlimitsErrorOnDup, rfc2119.Must, posixProcessRef)
+ register(LinuxProcCapError, rfc2119.Must, linuxProcessRef)
+ register(LinuxProcOomScoreAdjSet, rfc2119.Must, linuxProcessRef)
+ register(LinuxProcOomScoreAdjNotSet, rfc2119.Must, linuxProcessRef)
+ register(PlatformSpecConfOnWindowsSet, rfc2119.Must, platformSpecificConfigurationRef)
+ register(PosixHooksPathAbs, rfc2119.Must, posixPlatformHooksRef)
+ register(PosixHooksTimeoutPositive, rfc2119.Must, posixPlatformHooksRef)
+ register(PosixHooksCalledInOrder, rfc2119.Must, posixPlatformHooksRef)
+ register(PosixHooksStateToStdin, rfc2119.Must, posixPlatformHooksRef)
+ register(PrestartTiming, rfc2119.Must, prestartRef)
+ register(PoststartTiming, rfc2119.Must, poststartRef)
+ register(PoststopTiming, rfc2119.Must, poststopRef)
+ register(AnnotationsKeyValueMap, rfc2119.Must, annotationsRef)
+ register(AnnotationsKeyString, rfc2119.Must, annotationsRef)
+ register(AnnotationsKeyRequired, rfc2119.Must, annotationsRef)
+ register(AnnotationsKeyReversedDomain, rfc2119.Should, annotationsRef)
+ register(AnnotationsKeyReservedNS, rfc2119.Must, annotationsRef)
+ register(AnnotationsKeyIgnoreUnknown, rfc2119.Must, annotationsRef)
+ register(AnnotationsValueString, rfc2119.Must, annotationsRef)
+ register(ExtensibilityIgnoreUnknownProp, rfc2119.Must, extensibilityRef)
+ register(ValidValues, rfc2119.Must, validValuesRef)
+}
diff --git a/vendor/github.com/opencontainers/runtime-tools/specerror/error.go b/vendor/github.com/opencontainers/runtime-tools/specerror/error.go
index c75bb6b14..1cfe054c2 100644
--- a/vendor/github.com/opencontainers/runtime-tools/specerror/error.go
+++ b/vendor/github.com/opencontainers/runtime-tools/specerror/error.go
@@ -13,46 +13,13 @@ const referenceTemplate = "https://github.com/opencontainers/runtime-spec/blob/v
// Code represents the spec violation, enumerating both
// configuration violations and runtime violations.
-type Code int
+type Code string
const (
// NonError represents that an input is not an error
- NonError Code = iota
+ NonError = "the input is not an error"
// NonRFCError represents that an error is not a rfc2119 error
- NonRFCError
-
- // ConfigFileExistence represents the error code of 'config.json' existence test
- ConfigFileExistence
- // ArtifactsInSingleDir represents the error code of artifacts place test
- ArtifactsInSingleDir
-
- // SpecVersion represents the error code of specfication version test
- SpecVersion
-
- // RootOnNonHyperV represents the error code of root setting test on non hyper-v containers
- RootOnNonHyperV
- // RootOnHyperV represents the error code of root setting test on hyper-v containers
- RootOnHyperV
- // PathFormatOnWindows represents the error code of the path format test on Window
- PathFormatOnWindows
- // PathName represents the error code of the path name test
- PathName
- // PathExistence represents the error code of the path existence test
- PathExistence
- // ReadonlyFilesystem represents the error code of readonly test
- ReadonlyFilesystem
- // ReadonlyOnWindows represents the error code of readonly setting test on Windows
- ReadonlyOnWindows
-
- // DefaultFilesystems represents the error code of default filesystems test
- DefaultFilesystems
-
- // CreateWithID represents the error code of 'create' lifecyle test with 'id' provided
- CreateWithID
- // CreateWithUniqueID represents the error code of 'create' lifecyle test with unique 'id' provided
- CreateWithUniqueID
- // CreateNewContainer represents the error code 'create' lifecyle test that creates new container
- CreateNewContainer
+ NonRFCError = "the error is not a rfc2119 error"
)
type errorTemplate struct {
@@ -69,52 +36,24 @@ type Error struct {
Code Code
}
-var (
- containerFormatRef = func(version string) (reference string, err error) {
- return fmt.Sprintf(referenceTemplate, version, "bundle.md#container-format"), nil
- }
- specVersionRef = func(version string) (reference string, err error) {
- return fmt.Sprintf(referenceTemplate, version, "config.md#specification-version"), nil
- }
- rootRef = func(version string) (reference string, err error) {
- return fmt.Sprintf(referenceTemplate, version, "config.md#root"), nil
- }
- defaultFSRef = func(version string) (reference string, err error) {
- return fmt.Sprintf(referenceTemplate, version, "config-linux.md#default-filesystems"), nil
- }
- runtimeCreateRef = func(version string) (reference string, err error) {
- return fmt.Sprintf(referenceTemplate, version, "runtime.md#create"), nil
+// LevelErrors represents Errors filtered into fatal and warnings.
+type LevelErrors struct {
+ // Warnings holds Errors that were below a compliance-level threshold.
+ Warnings []*Error
+
+ // Error holds errors that were at or above a compliance-level
+ // threshold, as well as errors that are not Errors.
+ Error *multierror.Error
+}
+
+var ociErrors = map[Code]errorTemplate{}
+
+func register(code Code, level rfc2119.Level, ref func(versiong string) (string, error)) {
+ if _, ok := ociErrors[code]; ok {
+ panic(fmt.Sprintf("should not regist a same code twice: %s", code))
}
-)
-var ociErrors = map[Code]errorTemplate{
- // Bundle.md
- // Container Format
- ConfigFileExistence: {Level: rfc2119.Must, Reference: containerFormatRef},
- ArtifactsInSingleDir: {Level: rfc2119.Must, Reference: containerFormatRef},
-
- // Config.md
- // Specification Version
- SpecVersion: {Level: rfc2119.Must, Reference: specVersionRef},
- // Root
- RootOnNonHyperV: {Level: rfc2119.Required, Reference: rootRef},
- RootOnHyperV: {Level: rfc2119.Must, Reference: rootRef},
- // TODO: add tests for 'PathFormatOnWindows'
- PathFormatOnWindows: {Level: rfc2119.Must, Reference: rootRef},
- PathName: {Level: rfc2119.Should, Reference: rootRef},
- PathExistence: {Level: rfc2119.Must, Reference: rootRef},
- ReadonlyFilesystem: {Level: rfc2119.Must, Reference: rootRef},
- ReadonlyOnWindows: {Level: rfc2119.Must, Reference: rootRef},
-
- // Config-Linux.md
- // Default Filesystems
- DefaultFilesystems: {Level: rfc2119.Should, Reference: defaultFSRef},
-
- // Runtime.md
- // Create
- CreateWithID: {Level: rfc2119.Must, Reference: runtimeCreateRef},
- CreateWithUniqueID: {Level: rfc2119.Must, Reference: runtimeCreateRef},
- CreateNewContainer: {Level: rfc2119.Must, Reference: runtimeCreateRef},
+ ociErrors[code] = errorTemplate{Level: level, Reference: ref}
}
// Error returns the error message with specification reference.
@@ -168,3 +107,23 @@ func FindError(err error, code Code) Code {
}
return NonRFCError
}
+
+// SplitLevel removes RFC 2119 errors with a level less than 'level'
+// from the source error. If the source error is not a multierror, it
+// is returned unchanged.
+func SplitLevel(errIn error, level rfc2119.Level) (levelErrors LevelErrors, errOut error) {
+ merr, ok := errIn.(*multierror.Error)
+ if !ok {
+ return levelErrors, errIn
+ }
+ for _, err := range merr.Errors {
+ e, ok := err.(*Error)
+ if ok && e.Err.Level < level {
+ fmt.Println(e)
+ levelErrors.Warnings = append(levelErrors.Warnings, e)
+ continue
+ }
+ levelErrors.Error = multierror.Append(levelErrors.Error, err)
+ }
+ return levelErrors, nil
+}
diff --git a/vendor/github.com/opencontainers/runtime-tools/specerror/runtime-linux.go b/vendor/github.com/opencontainers/runtime-tools/specerror/runtime-linux.go
new file mode 100644
index 000000000..3ce7c3ed4
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-tools/specerror/runtime-linux.go
@@ -0,0 +1,23 @@
+package specerror
+
+import (
+ "fmt"
+
+ rfc2119 "github.com/opencontainers/runtime-tools/error"
+)
+
+// define error codes
+const (
+ // DefaultRuntimeLinuxSymlinks represents "While creating the container (step 2 in the lifecycle), runtimes MUST create default symlinks if the source file exists after processing `mounts`."
+ DefaultRuntimeLinuxSymlinks = "While creating the container (step 2 in the lifecycle), runtimes MUST create the default symlinks if the source file exists after processing `mounts`."
+)
+
+var (
+ devSymbolicLinksRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "runtime-linux.md#dev-symbolic-links"), nil
+ }
+)
+
+func init() {
+ register(DefaultRuntimeLinuxSymlinks, rfc2119.Must, devSymbolicLinksRef)
+}
diff --git a/vendor/github.com/opencontainers/runtime-tools/specerror/runtime.go b/vendor/github.com/opencontainers/runtime-tools/specerror/runtime.go
new file mode 100644
index 000000000..7552b3c84
--- /dev/null
+++ b/vendor/github.com/opencontainers/runtime-tools/specerror/runtime.go
@@ -0,0 +1,179 @@
+package specerror
+
+import (
+ "fmt"
+
+ rfc2119 "github.com/opencontainers/runtime-tools/error"
+)
+
+// define error codes
+const (
+ // EntityOperSameContainer represents "The entity using a runtime to create a container MUST be able to use the operations defined in this specification against that same container."
+ EntityOperSameContainer = "The entity using a runtime to create a container MUST be able to use the operations defined in this specification against that same container."
+ // StateIDUniq represents "`id` (string, REQUIRED) is the container's ID. This MUST be unique across all containers on this host."
+ StateIDUniq = "`id` (string, REQUIRED) is the container's ID. This MUST be unique across all containers on this host."
+ // StateNewStatus represents "Additional values MAY be defined by the runtime, however, they MUST be used to represent new runtime states not defined above."
+ StateNewStatus = "Additional values MAY be defined by the runtime, however, they MUST be used to represent new runtime states not defined above."
+ // DefaultStateJSONPattern represents "When serialized in JSON, the format MUST adhere to the default pattern."
+ DefaultStateJSONPattern = "When serialized in JSON, the format MUST adhere to the default pattern."
+ // EnvCreateImplement represents "The container's runtime environment MUST be created according to the configuration in `config.json`."
+ EnvCreateImplement = "The container's runtime environment MUST be created according to the configuration in `config.json`."
+ // EnvCreateError represents "If the runtime is unable to create the environment specified in the `config.json`, it MUST generate an error."
+ EnvCreateError = "If the runtime is unable to create the environment specified in the `config.json`. it MUST generate an error."
+ // ProcNotRunAtResRequest represents "While the resources requested in the `config.json` MUST be created, the user-specified program (from `process`) MUST NOT be run at this time."
+ ProcNotRunAtResRequest = "While the resources requested in the `config.json` MUST be created, the user-specified program (from `process`) MUST NOT be run at this time."
+ // ConfigUpdatesWithoutAffect represents "Any updates to `config.json` after this step MUST NOT affect the container."
+ ConfigUpdatesWithoutAffect = "Any updates to `config.json` after this step MUST NOT affect the container."
+ // PrestartHooksInvoke represents "The prestart hooks MUST be invoked by the runtime."
+ PrestartHooksInvoke = "The prestart hooks MUST be invoked by the runtime."
+ // PrestartHookFailGenError represents "If any prestart hook fails, the runtime MUST generate an error, stop the container, and continue the lifecycle at step 9."
+ PrestartHookFailGenError = "If any prestart hook fails, the runtime MUST generate an error, stop the container, and continue the lifecycle at step 9."
+ // ProcImplement represents "The runtime MUST run the user-specified program, as specified by `process`."
+ ProcImplement = "The runtime MUST run the user-specified program, as specified by `process`."
+ // PoststartHooksInvoke represents "The poststart hooks MUST be invoked by the runtime."
+ PoststartHooksInvoke = "The poststart hooks MUST be invoked by the runtime."
+ // PoststartHookFailGenWarn represents "If any poststart hook fails, the runtime MUST log a warning, but the remaining hooks and lifecycle continue as if the hook had succeeded."
+ PoststartHookFailGenWarn = "If any poststart hook fails, the runtime MUST log a warning, but the remaining hooks and lifecycle continue as if the hook had succeeded."
+ // UndoCreateSteps represents "The container MUST be destroyed by undoing the steps performed during create phase (step 2)."
+ UndoCreateSteps = "The container MUST be destroyed by undoing the steps performed during create phase (step 2)."
+ // PoststopHooksInvoke represents "The poststop hooks MUST be invoked by the runtime."
+ PoststopHooksInvoke = "The poststop hooks MUST be invoked by the runtime."
+ // PoststopHookFailGenWarn represents "If any poststop hook fails, the runtime MUST log a warning, but the remaining hooks and lifecycle continue as if the hook had succeeded."
+ PoststopHookFailGenWarn = "If any poststop hook fails, the runtime MUST log a warning, but the remaining hooks and lifecycle continue as if the hook had succeeded."
+ // ErrorsLeaveStateUnchange represents "Unless otherwise stated, generating an error MUST leave the state of the environment as if the operation were never attempted - modulo any possible trivial ancillary changes such as logging."
+ ErrorsLeaveStateUnchange = "Unless otherwise stated, generating an error MUST leave the state of the environment as if the operation were never attempted - modulo any possible trivial ancillary changes such as logging."
+ // WarnsLeaveFlowUnchange represents "Unless otherwise stated, logging a warning does not change the flow of the operation; it MUST continue as if the warning had not been logged."
+ WarnsLeaveFlowUnchange = "Unless otherwise stated, logging a warning does not change the flow of the operation; it MUST continue as if the warning had not been logged."
+ // DefaultOperations represents "Unless otherwise stated, runtimes MUST support the default operations."
+ DefaultOperations = "Unless otherwise stated, runtimes MUST support the default operations."
+ // QueryWithoutIDGenError represents "This operation MUST generate an error if it is not provided the ID of a container."
+ QueryWithoutIDGenError = "This operation MUST generate an error if it is not provided the ID of a container."
+ // QueryNonExistGenError represents "Attempting to query a container that does not exist MUST generate an error."
+ QueryNonExistGenError = "Attempting to query a container that does not exist MUST generate an error."
+ // QueryStateImplement represents "This operation MUST return the state of a container as specified in the State section."
+ QueryStateImplement = "This operation MUST return the state of a container as specified in the State section."
+ // CreateWithBundlePathAndID represents "This operation MUST generate an error if it is not provided a path to the bundle and the container ID to associate with the container."
+ CreateWithBundlePathAndID = "This operation MUST generate an error if it is not provided a path to the bundle and the container ID to associate with the container."
+ // CreateWithUniqueID represents "If the ID provided is not unique across all containers within the scope of the runtime, or is not valid in any other way, the implementation MUST generate an error and a new container MUST NOT be created."
+ CreateWithUniqueID = "If the ID provided is not unique across all containers within the scope of the runtime, or is not valid in any other way, the implementation MUST generate an error and a new container MUST NOT be created."
+ // CreateNewContainer represents "This operation MUST create a new container."
+ CreateNewContainer = "This operation MUST create a new container."
+ // PropsApplyExceptProcOnCreate represents "All of the properties configured in `config.json` except for `process` MUST be applied."
+ PropsApplyExceptProcOnCreate = "All of the properties configured in `config.json` except for `process` MUST be applied."
+ // ProcArgsApplyUntilStart represents `process.args` MUST NOT be applied until triggered by the `start` operation."
+ ProcArgsApplyUntilStart = "`process.args` MUST NOT be applied until triggered by the `start` operation."
+ // PropApplyFailGenError represents "If the runtime cannot apply a property as specified in the configuration, it MUST generate an error."
+ PropApplyFailGenError = "If the runtime cannot apply a property as specified in the configuration, it MUST generate an error."
+ // PropApplyFailNotCreate represents "If the runtime cannot apply a property as specified in the configuration, a new container MUST NOT be created."
+ PropApplyFailNotCreate = "If the runtime cannot apply a property as specified in the configuration, a new container MUST NOT be created."
+ // StartWithoutIDGenError represents "`start` operation MUST generate an error if it is not provided the container ID."
+ StartWithoutIDGenError = "`start` operation MUST generate an error if it is not provided the container ID."
+ // StartNonCreateHaveNoEffect represents "Attempting to `start` a container that is not `created` MUST have no effect on the container."
+ StartNonCreateHaveNoEffect = "Attempting to `start` a container that is not `created` MUST have no effect on the container."
+ // StartNonCreateGenError represents "Attempting to `start` a container that is not `created` MUST generate an error."
+ StartNonCreateGenError = "Attempting to `start` a container that is not `created` MUST generate an error."
+ // StartProcImplement represents "`start` operation MUST run the user-specified program as specified by `process`."
+ StartProcImplement = "`start` operation MUST run the user-specified program as specified by `process`."
+ // StartWithProcUnsetGenError represents "`start` operation MUST generate an error if `process` was not set."
+ StartWithProcUnsetGenError = "`start` operation MUST generate an error if `process` was not set."
+ // KillWithoutIDGenError represents "`kill` operation MUST generate an error if it is not provided the container ID."
+ KillWithoutIDGenError = "`kill` operation MUST generate an error if it is not provided the container ID."
+ // KillNonCreateRunHaveNoEffect represents "Attempting to send a signal to a container that is neither `created` nor `running` MUST have no effect on the container."
+ KillNonCreateRunHaveNoEffect = "Attempting to send a signal to a container that is neither `created` nor `running` MUST have no effect on the container."
+ // KillNonCreateRunGenError represents "Attempting to send a signal to a container that is neither `created` nor `running` MUST generate an error."
+ KillNonCreateRunGenError = "Attempting to send a signal to a container that is neither `created` nor `running` MUST generate an error."
+ // KillSignalImplement represents "`kill` operation MUST send the specified signal to the container process."
+ KillSignalImplement = "`kill` operation MUST send the specified signal to the container process."
+ // DeleteWithoutIDGenError represents "`delete` operation MUST generate an error if it is not provided the container ID."
+ DeleteWithoutIDGenError = "`delete` operation MUST generate an error if it is not provided the container ID."
+ // DeleteNonStopHaveNoEffect represents "Attempting to `delete` a container that is not `stopped` MUST have no effect on the container."
+ DeleteNonStopHaveNoEffect = "Attempting to `delete` a container that is not `stopped` MUST have no effect on the container."
+ // DeleteNonStopGenError represents "Attempting to `delete` a container that is not `stopped` MUST generate an error."
+ DeleteNonStopGenError = "Attempting to `delete` a container that is not `stopped` MUST generate an error."
+ // DeleteResImplement represents "Deleting a container MUST delete the resources that were created during the `create` step."
+ DeleteResImplement = "Deleting a container MUST delete the resources that were created during the `create` step."
+ // DeleteOnlyCreatedRes represents "Note that resources associated with the container, but not created by this container, MUST NOT be deleted."
+ DeleteOnlyCreatedRes = "Note that resources associated with the container, but not created by this container, MUST NOT be deleted."
+)
+
+var (
+ scopeOfAContainerRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "runtime.md#scope-of-a-container"), nil
+ }
+ stateRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "runtime.md#state"), nil
+ }
+ lifecycleRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "runtime.md#lifecycle"), nil
+ }
+ errorsRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "runtime.md#errors"), nil
+ }
+ warningsRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "runtime.md#warnings"), nil
+ }
+ operationsRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "runtime.md#operations"), nil
+ }
+ queryStateRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "runtime.md#query-state"), nil
+ }
+ createRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "runtime.md#create"), nil
+ }
+ startRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "runtime.md#start"), nil
+ }
+ killRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "runtime.md#kill"), nil
+ }
+ deleteRef = func(version string) (reference string, err error) {
+ return fmt.Sprintf(referenceTemplate, version, "runtime.md#delete"), nil
+ }
+)
+
+func init() {
+ register(EntityOperSameContainer, rfc2119.Must, scopeOfAContainerRef)
+ register(StateIDUniq, rfc2119.Must, stateRef)
+ register(StateNewStatus, rfc2119.Must, stateRef)
+ register(DefaultStateJSONPattern, rfc2119.Must, stateRef)
+ register(EnvCreateImplement, rfc2119.Must, lifecycleRef)
+ register(EnvCreateError, rfc2119.Must, lifecycleRef)
+ register(ProcNotRunAtResRequest, rfc2119.Must, lifecycleRef)
+ register(ConfigUpdatesWithoutAffect, rfc2119.Must, lifecycleRef)
+ register(PrestartHooksInvoke, rfc2119.Must, lifecycleRef)
+ register(PrestartHookFailGenError, rfc2119.Must, lifecycleRef)
+ register(ProcImplement, rfc2119.Must, lifecycleRef)
+ register(PoststartHooksInvoke, rfc2119.Must, lifecycleRef)
+ register(PoststartHookFailGenWarn, rfc2119.Must, lifecycleRef)
+ register(UndoCreateSteps, rfc2119.Must, lifecycleRef)
+ register(PoststopHooksInvoke, rfc2119.Must, lifecycleRef)
+ register(PoststopHookFailGenWarn, rfc2119.Must, lifecycleRef)
+ register(ErrorsLeaveStateUnchange, rfc2119.Must, errorsRef)
+ register(WarnsLeaveFlowUnchange, rfc2119.Must, warningsRef)
+ register(DefaultOperations, rfc2119.Must, operationsRef)
+ register(QueryWithoutIDGenError, rfc2119.Must, queryStateRef)
+ register(QueryNonExistGenError, rfc2119.Must, queryStateRef)
+ register(QueryStateImplement, rfc2119.Must, queryStateRef)
+ register(CreateWithBundlePathAndID, rfc2119.Must, createRef)
+ register(CreateWithUniqueID, rfc2119.Must, createRef)
+ register(CreateNewContainer, rfc2119.Must, createRef)
+ register(PropsApplyExceptProcOnCreate, rfc2119.Must, createRef)
+ register(ProcArgsApplyUntilStart, rfc2119.Must, createRef)
+ register(PropApplyFailGenError, rfc2119.Must, createRef)
+ register(PropApplyFailNotCreate, rfc2119.Must, createRef)
+ register(StartWithoutIDGenError, rfc2119.Must, startRef)
+ register(StartNonCreateHaveNoEffect, rfc2119.Must, startRef)
+ register(StartNonCreateGenError, rfc2119.Must, startRef)
+ register(StartProcImplement, rfc2119.Must, startRef)
+ register(StartWithProcUnsetGenError, rfc2119.Must, startRef)
+ register(KillWithoutIDGenError, rfc2119.Must, killRef)
+ register(KillNonCreateRunHaveNoEffect, rfc2119.Must, killRef)
+ register(KillNonCreateRunGenError, rfc2119.Must, killRef)
+ register(KillSignalImplement, rfc2119.Must, killRef)
+ register(DeleteWithoutIDGenError, rfc2119.Must, deleteRef)
+ register(DeleteNonStopHaveNoEffect, rfc2119.Must, deleteRef)
+ register(DeleteNonStopGenError, rfc2119.Must, deleteRef)
+ register(DeleteResImplement, rfc2119.Must, deleteRef)
+ register(DeleteOnlyCreatedRes, rfc2119.Must, deleteRef)
+}
diff --git a/vendor/github.com/opencontainers/runtime-tools/validate/validate.go b/vendor/github.com/opencontainers/runtime-tools/validate/validate.go
index bbdb29c60..0914bc691 100644
--- a/vendor/github.com/opencontainers/runtime-tools/validate/validate.go
+++ b/vendor/github.com/opencontainers/runtime-tools/validate/validate.go
@@ -20,33 +20,41 @@ import (
"github.com/blang/semver"
"github.com/hashicorp/go-multierror"
rspec "github.com/opencontainers/runtime-spec/specs-go"
+ osFilepath "github.com/opencontainers/runtime-tools/filepath"
"github.com/sirupsen/logrus"
"github.com/syndtr/gocapability/capability"
"github.com/opencontainers/runtime-tools/specerror"
+ "github.com/xeipuuv/gojsonschema"
)
const specConfig = "config.json"
var (
- defaultRlimits = []string{
+ // http://pubs.opengroup.org/onlinepubs/9699919799/functions/getrlimit.html
+ posixRlimits = []string{
"RLIMIT_AS",
"RLIMIT_CORE",
"RLIMIT_CPU",
"RLIMIT_DATA",
"RLIMIT_FSIZE",
- "RLIMIT_LOCKS",
+ "RLIMIT_NOFILE",
+ "RLIMIT_STACK",
+ }
+
+ // https://git.kernel.org/pub/scm/docs/man-pages/man-pages.git/tree/man2/getrlimit.2?h=man-pages-4.13
+ linuxRlimits = append(posixRlimits, []string{
"RLIMIT_MEMLOCK",
"RLIMIT_MSGQUEUE",
"RLIMIT_NICE",
- "RLIMIT_NOFILE",
"RLIMIT_NPROC",
"RLIMIT_RSS",
"RLIMIT_RTPRIO",
"RLIMIT_RTTIME",
"RLIMIT_SIGPENDING",
- "RLIMIT_STACK",
- }
+ }...)
+
+ configSchemaTemplate = "https://raw.githubusercontent.com/opencontainers/runtime-spec/v%s/schema/config-schema.json"
)
// Validator represents a validator for runtime bundle
@@ -86,7 +94,7 @@ func NewValidatorFromPath(bundlePath string, hostSpecific bool, platform string)
configPath := filepath.Join(bundlePath, specConfig)
content, err := ioutil.ReadFile(configPath)
if err != nil {
- return Validator{}, specerror.NewError(specerror.ConfigFileExistence, err, rspec.Version)
+ return Validator{}, specerror.NewError(specerror.ConfigInRootBundleDir, err, rspec.Version)
}
if !utf8.Valid(content) {
return Validator{}, fmt.Errorf("%q is not encoded in UTF-8", configPath)
@@ -100,7 +108,9 @@ func NewValidatorFromPath(bundlePath string, hostSpecific bool, platform string)
}
// CheckAll checks all parts of runtime bundle
-func (v *Validator) CheckAll() (errs error) {
+func (v *Validator) CheckAll() error {
+ var errs *multierror.Error
+ errs = multierror.Append(errs, v.CheckJSONSchema())
errs = multierror.Append(errs, v.CheckPlatform())
errs = multierror.Append(errs, v.CheckRoot())
errs = multierror.Append(errs, v.CheckMandatoryFields())
@@ -110,7 +120,50 @@ func (v *Validator) CheckAll() (errs error) {
errs = multierror.Append(errs, v.CheckHooks())
errs = multierror.Append(errs, v.CheckLinux())
- return
+ return errs.ErrorOrNil()
+}
+
+// JSONSchemaURL returns the URL for the JSON Schema specifying the
+// configuration format. It consumes configSchemaTemplate, but we
+// provide it as a function to isolate consumers from inconsistent
+// naming as runtime-spec evolves.
+func JSONSchemaURL(version string) (url string, err error) {
+ ver, err := semver.Parse(version)
+ if err != nil {
+ return "", specerror.NewError(specerror.SpecVersionInSemVer, err, rspec.Version)
+ }
+ configRenamedToConfigSchemaVersion, err := semver.Parse("1.0.0-rc2") // config.json became config-schema.json in 1.0.0-rc2
+ if ver.Compare(configRenamedToConfigSchemaVersion) == -1 {
+ return "", fmt.Errorf("unsupported configuration version (older than %s)", configRenamedToConfigSchemaVersion)
+ }
+ return fmt.Sprintf(configSchemaTemplate, version), nil
+}
+
+// CheckJSONSchema validates the configuration against the
+// runtime-spec JSON Schema, using the version of the schema that
+// matches the configuration's declared version.
+func (v *Validator) CheckJSONSchema() (errs error) {
+ url, err := JSONSchemaURL(v.spec.Version)
+ if err != nil {
+ errs = multierror.Append(errs, err)
+ return errs
+ }
+
+ schemaLoader := gojsonschema.NewReferenceLoader(url)
+ documentLoader := gojsonschema.NewGoLoader(v.spec)
+ result, err := gojsonschema.Validate(schemaLoader, documentLoader)
+ if err != nil {
+ errs = multierror.Append(errs, err)
+ return errs
+ }
+
+ if !result.Valid() {
+ for _, resultError := range result.Errors() {
+ errs = multierror.Append(errs, errors.New(resultError.String()))
+ }
+ }
+
+ return errs
}
// CheckRoot checks status of v.spec.Root
@@ -120,13 +173,30 @@ func (v *Validator) CheckRoot() (errs error) {
if v.platform == "windows" && v.spec.Windows != nil && v.spec.Windows.HyperV != nil {
if v.spec.Root != nil {
errs = multierror.Append(errs,
- specerror.NewError(specerror.RootOnHyperV, fmt.Errorf("for Hyper-V containers, Root must not be set"), rspec.Version))
+ specerror.NewError(specerror.RootOnHyperVNotSet, fmt.Errorf("for Hyper-V containers, Root must not be set"), rspec.Version))
return
}
return
} else if v.spec.Root == nil {
errs = multierror.Append(errs,
- specerror.NewError(specerror.RootOnNonHyperV, fmt.Errorf("for non-Hyper-V containers, Root must be set"), rspec.Version))
+ specerror.NewError(specerror.RootOnNonHyperVRequired, fmt.Errorf("for non-Hyper-V containers, Root must be set"), rspec.Version))
+ return
+ }
+
+ if v.platform == "windows" {
+ matched, err := regexp.MatchString(`\\\\[?]\\Volume[{][a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}[}]\\`, v.spec.Root.Path)
+ if err != nil {
+ errs = multierror.Append(errs, err)
+ } else if !matched {
+ errs = multierror.Append(errs,
+ specerror.NewError(specerror.RootPathOnWindowsGUID, fmt.Errorf("root.path is %q, but it MUST be a volume GUID path when target platform is windows", v.spec.Root.Path), rspec.Version))
+ }
+
+ if v.spec.Root.Readonly {
+ errs = multierror.Append(errs,
+ specerror.NewError(specerror.RootReadonlyOnWindowsFalse, fmt.Errorf("root.readonly field MUST be omitted or false when target platform is windows"), rspec.Version))
+ }
+
return
}
@@ -138,7 +208,7 @@ func (v *Validator) CheckRoot() (errs error) {
if filepath.Base(v.spec.Root.Path) != "rootfs" {
errs = multierror.Append(errs,
- specerror.NewError(specerror.PathName, fmt.Errorf("path name should be the conventional 'rootfs'"), rspec.Version))
+ specerror.NewError(specerror.RootPathOnPosixConvention, fmt.Errorf("path name should be the conventional 'rootfs'"), rspec.Version))
}
var rootfsPath string
@@ -158,10 +228,10 @@ func (v *Validator) CheckRoot() (errs error) {
if fi, err := os.Stat(rootfsPath); err != nil {
errs = multierror.Append(errs,
- specerror.NewError(specerror.PathExistence, fmt.Errorf("cannot find the root path %q", rootfsPath), rspec.Version))
+ specerror.NewError(specerror.RootPathExist, fmt.Errorf("cannot find the root path %q", rootfsPath), rspec.Version))
} else if !fi.IsDir() {
errs = multierror.Append(errs,
- specerror.NewError(specerror.PathExistence, fmt.Errorf("root.path %q is not a directory", rootfsPath), rspec.Version))
+ specerror.NewError(specerror.RootPathExist, fmt.Errorf("root.path %q is not a directory", rootfsPath), rspec.Version))
}
rootParent := filepath.Dir(absRootPath)
@@ -170,13 +240,6 @@ func (v *Validator) CheckRoot() (errs error) {
specerror.NewError(specerror.ArtifactsInSingleDir, fmt.Errorf("root.path is %q, but it MUST be a child of %q", v.spec.Root.Path, absBundlePath), rspec.Version))
}
- if v.platform == "windows" {
- if v.spec.Root.Readonly {
- errs = multierror.Append(errs,
- specerror.NewError(specerror.ReadonlyOnWindows, fmt.Errorf("root.readonly field MUST be omitted or false when target platform is windows"), rspec.Version))
- }
- }
-
return
}
@@ -188,7 +251,7 @@ func (v *Validator) CheckSemVer() (errs error) {
_, err := semver.Parse(version)
if err != nil {
errs = multierror.Append(errs,
- specerror.NewError(specerror.SpecVersion, fmt.Errorf("%q is not valid SemVer: %s", version, err.Error()), rspec.Version))
+ specerror.NewError(specerror.SpecVersionInSemVer, fmt.Errorf("%q is not valid SemVer: %s", version, err.Error()), rspec.Version))
}
if version != rspec.Version {
errs = multierror.Append(errs, fmt.Errorf("validate currently only handles version %s, but the supplied configuration targets %s", rspec.Version, version))
@@ -202,18 +265,23 @@ func (v *Validator) CheckHooks() (errs error) {
logrus.Debugf("check hooks")
if v.spec.Hooks != nil {
- errs = multierror.Append(errs, checkEventHooks("pre-start", v.spec.Hooks.Prestart, v.HostSpecific))
- errs = multierror.Append(errs, checkEventHooks("post-start", v.spec.Hooks.Poststart, v.HostSpecific))
- errs = multierror.Append(errs, checkEventHooks("post-stop", v.spec.Hooks.Poststop, v.HostSpecific))
+ errs = multierror.Append(errs, v.checkEventHooks("prestart", v.spec.Hooks.Prestart, v.HostSpecific))
+ errs = multierror.Append(errs, v.checkEventHooks("poststart", v.spec.Hooks.Poststart, v.HostSpecific))
+ errs = multierror.Append(errs, v.checkEventHooks("poststop", v.spec.Hooks.Poststop, v.HostSpecific))
}
return
}
-func checkEventHooks(hookType string, hooks []rspec.Hook, hostSpecific bool) (errs error) {
- for _, hook := range hooks {
- if !filepath.IsAbs(hook.Path) {
- errs = multierror.Append(errs, fmt.Errorf("the %s hook %v: is not absolute path", hookType, hook.Path))
+func (v *Validator) checkEventHooks(hookType string, hooks []rspec.Hook, hostSpecific bool) (errs error) {
+ for i, hook := range hooks {
+ if !osFilepath.IsAbs(v.platform, hook.Path) {
+ errs = multierror.Append(errs,
+ specerror.NewError(
+ specerror.PosixHooksPathAbs,
+ fmt.Errorf("hooks.%s[%d].path %v: is not absolute path",
+ hookType, i, hook.Path),
+ rspec.Version))
}
if hostSpecific {
@@ -245,8 +313,12 @@ func (v *Validator) CheckProcess() (errs error) {
}
process := v.spec.Process
- if !filepath.IsAbs(process.Cwd) {
- errs = multierror.Append(errs, fmt.Errorf("cwd %q is not an absolute path", process.Cwd))
+ if !osFilepath.IsAbs(v.platform, process.Cwd) {
+ errs = multierror.Append(errs,
+ specerror.NewError(
+ specerror.ProcCwdAbs,
+ fmt.Errorf("cwd %q is not an absolute path", process.Cwd),
+ rspec.Version))
}
for _, env := range process.Env {
@@ -256,7 +328,11 @@ func (v *Validator) CheckProcess() (errs error) {
}
if len(process.Args) == 0 {
- errs = multierror.Append(errs, fmt.Errorf("args must not be empty"))
+ errs = multierror.Append(errs,
+ specerror.NewError(
+ specerror.ProcArgsOneEntryRequired,
+ fmt.Errorf("args must not be empty"),
+ rspec.Version))
} else {
if filepath.IsAbs(process.Args[0]) {
var rootfsPath string
@@ -348,7 +424,7 @@ func (v *Validator) CheckCapabilities() (errs error) {
if effective && !permitted {
errs = multierror.Append(errs, fmt.Errorf("effective capability %q is not allowed, as it's not permitted", capability))
}
- if ambient && !(effective && inheritable) {
+ if ambient && !(permitted && inheritable) {
errs = multierror.Append(errs, fmt.Errorf("ambient capability %q is not allowed, as it's not permitted and inheribate", capability))
}
}
@@ -361,11 +437,20 @@ func (v *Validator) CheckCapabilities() (errs error) {
// CheckRlimits checks v.spec.Process.Rlimits
func (v *Validator) CheckRlimits() (errs error) {
+ if v.platform == "windows" {
+ return
+ }
+
process := v.spec.Process
for index, rlimit := range process.Rlimits {
for i := index + 1; i < len(process.Rlimits); i++ {
if process.Rlimits[index].Type == process.Rlimits[i].Type {
- errs = multierror.Append(errs, fmt.Errorf("rlimit can not contain the same type %q", process.Rlimits[index].Type))
+ errs = multierror.Append(errs,
+ specerror.NewError(
+ specerror.PosixProcRlimitsErrorOnDup,
+ fmt.Errorf("rlimit can not contain the same type %q",
+ process.Rlimits[index].Type),
+ rspec.Version))
}
}
errs = multierror.Append(errs, v.rlimitValid(rlimit))
@@ -429,31 +514,33 @@ func (v *Validator) CheckMounts() (errs error) {
if supportedTypes != nil && !supportedTypes[mountA.Type] {
errs = multierror.Append(errs, fmt.Errorf("unsupported mount type %q", mountA.Type))
}
- if v.platform == "windows" {
- if err := pathValid(v.platform, mountA.Destination); err != nil {
- errs = multierror.Append(errs, err)
- }
- if err := pathValid(v.platform, mountA.Source); err != nil {
- errs = multierror.Append(errs, err)
- }
- } else {
- if err := pathValid(v.platform, mountA.Destination); err != nil {
- errs = multierror.Append(errs, err)
- }
+ if !osFilepath.IsAbs(v.platform, mountA.Destination) {
+ errs = multierror.Append(errs,
+ specerror.NewError(
+ specerror.MountsDestAbs,
+ fmt.Errorf("mounts[%d].destination %q is not absolute",
+ i,
+ mountA.Destination),
+ rspec.Version))
}
for j, mountB := range v.spec.Mounts {
if i == j {
continue
}
// whether B.Desination is nested within A.Destination
- nested, err := nestedValid(v.platform, mountA.Destination, mountB.Destination)
+ nested, err := osFilepath.IsAncestor(v.platform, mountA.Destination, mountB.Destination, ".")
if err != nil {
errs = multierror.Append(errs, err)
continue
}
if nested {
if v.platform == "windows" && i < j {
- errs = multierror.Append(errs, fmt.Errorf("on Windows, %v nested within %v is forbidden", mountB.Destination, mountA.Destination))
+ errs = multierror.Append(errs,
+ specerror.NewError(
+ specerror.MountsDestOnWindowsNotNested,
+ fmt.Errorf("on Windows, %v nested within %v is forbidden",
+ mountB.Destination, mountA.Destination),
+ rspec.Version))
}
if i > j {
logrus.Warnf("%v will be covered by %v", mountB.Destination, mountA.Destination)
@@ -476,7 +563,11 @@ func (v *Validator) CheckPlatform() (errs error) {
if v.platform == "windows" {
if v.spec.Windows == nil {
- errs = multierror.Append(errs, errors.New("'windows' MUST be set when platform is `windows`"))
+ errs = multierror.Append(errs,
+ specerror.NewError(
+ specerror.PlatformSpecConfOnWindowsSet,
+ fmt.Errorf("'windows' MUST be set when platform is `windows`"),
+ rspec.Version))
}
}
@@ -506,14 +597,14 @@ func (v *Validator) CheckLinux() (errs error) {
for index := 0; index < len(v.spec.Linux.Namespaces); index++ {
ns := v.spec.Linux.Namespaces[index]
- if !namespaceValid(ns) {
+ if !v.namespaceValid(ns) {
errs = multierror.Append(errs, fmt.Errorf("namespace %v is invalid", ns))
}
tmpItem := nsTypeList[ns.Type]
tmpItem.num = tmpItem.num + 1
if tmpItem.num > 1 {
- errs = multierror.Append(errs, fmt.Errorf("duplicated namespace %q", ns.Type))
+ errs = multierror.Append(errs, specerror.NewError(specerror.NSErrorOnDup, fmt.Errorf("duplicated namespace %q", ns.Type), rspec.Version))
}
if len(ns.Path) == 0 {
@@ -572,7 +663,8 @@ func (v *Validator) CheckLinux() (errs error) {
} else {
fStat, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
- errs = multierror.Append(errs, fmt.Errorf("cannot determine state for device %s", device.Path))
+ errs = multierror.Append(errs, specerror.NewError(specerror.DevicesAvailable,
+ fmt.Errorf("cannot determine state for device %s", device.Path), rspec.Version))
continue
}
var devType string
@@ -587,7 +679,8 @@ func (v *Validator) CheckLinux() (errs error) {
devType = "unmatched"
}
if devType != device.Type || (devType == "c" && device.Type == "u") {
- errs = multierror.Append(errs, fmt.Errorf("unmatched %s already exists in filesystem", device.Path))
+ errs = multierror.Append(errs, specerror.NewError(specerror.DevicesFileNotMatch,
+ fmt.Errorf("unmatched %s already exists in filesystem", device.Path), rspec.Version))
continue
}
if devType != "p" {
@@ -595,7 +688,8 @@ func (v *Validator) CheckLinux() (errs error) {
major := (dev >> 8) & 0xfff
minor := (dev & 0xff) | ((dev >> 12) & 0xfff00)
if int64(major) != device.Major || int64(minor) != device.Minor {
- errs = multierror.Append(errs, fmt.Errorf("unmatched %s already exists in filesystem", device.Path))
+ errs = multierror.Append(errs, specerror.NewError(specerror.DevicesFileNotMatch,
+ fmt.Errorf("unmatched %s already exists in filesystem", device.Path), rspec.Version))
continue
}
}
@@ -603,19 +697,22 @@ func (v *Validator) CheckLinux() (errs error) {
expectedPerm := *device.FileMode & os.ModePerm
actualPerm := fi.Mode() & os.ModePerm
if expectedPerm != actualPerm {
- errs = multierror.Append(errs, fmt.Errorf("unmatched %s already exists in filesystem", device.Path))
+ errs = multierror.Append(errs, specerror.NewError(specerror.DevicesFileNotMatch,
+ fmt.Errorf("unmatched %s already exists in filesystem", device.Path), rspec.Version))
continue
}
}
if device.UID != nil {
if *device.UID != fStat.Uid {
- errs = multierror.Append(errs, fmt.Errorf("unmatched %s already exists in filesystem", device.Path))
+ errs = multierror.Append(errs, specerror.NewError(specerror.DevicesFileNotMatch,
+ fmt.Errorf("unmatched %s already exists in filesystem", device.Path), rspec.Version))
continue
}
}
if device.GID != nil {
if *device.GID != fStat.Gid {
- errs = multierror.Append(errs, fmt.Errorf("unmatched %s already exists in filesystem", device.Path))
+ errs = multierror.Append(errs, specerror.NewError(specerror.DevicesFileNotMatch,
+ fmt.Errorf("unmatched %s already exists in filesystem", device.Path), rspec.Version))
continue
}
}
@@ -645,29 +742,23 @@ func (v *Validator) CheckLinux() (errs error) {
errs = multierror.Append(errs, v.CheckSeccomp())
}
- switch v.spec.Linux.RootfsPropagation {
- case "":
- case "private":
- case "rprivate":
- case "slave":
- case "rslave":
- case "shared":
- case "rshared":
- case "unbindable":
- case "runbindable":
- default:
- errs = multierror.Append(errs, errors.New("rootfsPropagation must be empty or one of \"private|rprivate|slave|rslave|shared|rshared|unbindable|runbindable\""))
- }
-
for _, maskedPath := range v.spec.Linux.MaskedPaths {
if !strings.HasPrefix(maskedPath, "/") {
- errs = multierror.Append(errs, fmt.Errorf("maskedPath %v is not an absolute path", maskedPath))
+ errs = multierror.Append(errs,
+ specerror.NewError(
+ specerror.MaskedPathsAbs,
+ fmt.Errorf("maskedPath %v is not an absolute path", maskedPath),
+ rspec.Version))
}
}
for _, readonlyPath := range v.spec.Linux.ReadonlyPaths {
if !strings.HasPrefix(readonlyPath, "/") {
- errs = multierror.Append(errs, fmt.Errorf("readonlyPath %v is not an absolute path", readonlyPath))
+ errs = multierror.Append(errs,
+ specerror.NewError(
+ specerror.ReadonlyPathsAbs,
+ fmt.Errorf("readonlyPath %v is not an absolute path", readonlyPath),
+ rspec.Version))
}
}
@@ -709,7 +800,7 @@ func (v *Validator) CheckLinuxResources() (errs error) {
}
for index := 0; index < len(r.Devices); index++ {
switch r.Devices[index].Type {
- case "a", "b", "c":
+ case "a", "b", "c", "":
default:
errs = multierror.Append(errs, fmt.Errorf("type of devices %s is invalid", r.Devices[index].Type))
}
@@ -825,12 +916,19 @@ func (v *Validator) rlimitValid(rlimit rspec.POSIXRlimit) (errs error) {
}
if v.platform == "linux" {
- for _, val := range defaultRlimits {
+ for _, val := range linuxRlimits {
+ if val == rlimit.Type {
+ return
+ }
+ }
+ errs = multierror.Append(errs, specerror.NewError(specerror.PosixProcRlimitsTypeValueError, fmt.Errorf("rlimit type %q may not be valid", rlimit.Type), v.spec.Version))
+ } else if v.platform == "solaris" {
+ for _, val := range posixRlimits {
if val == rlimit.Type {
return
}
}
- errs = multierror.Append(errs, fmt.Errorf("rlimit type %q is invalid", rlimit.Type))
+ errs = multierror.Append(errs, specerror.NewError(specerror.PosixProcRlimitsTypeValueError, fmt.Errorf("rlimit type %q may not be valid", rlimit.Type), v.spec.Version))
} else {
logrus.Warnf("process.rlimits validation not yet implemented for platform %q", v.platform)
}
@@ -838,7 +936,7 @@ func (v *Validator) rlimitValid(rlimit rspec.POSIXRlimit) (errs error) {
return
}
-func namespaceValid(ns rspec.LinuxNamespace) bool {
+func (v *Validator) namespaceValid(ns rspec.LinuxNamespace) bool {
switch ns.Type {
case rspec.PIDNamespace:
case rspec.NetworkNamespace:
@@ -851,72 +949,13 @@ func namespaceValid(ns rspec.LinuxNamespace) bool {
return false
}
- if ns.Path != "" && !filepath.IsAbs(ns.Path) {
+ if ns.Path != "" && !osFilepath.IsAbs(v.platform, ns.Path) {
return false
}
return true
}
-func pathValid(os, path string) error {
- if os == "windows" {
- matched, err := regexp.MatchString("^[a-zA-Z]:(\\\\[^\\\\/<>|:*?\"]+)+$", path)
- if err != nil {
- return err
- }
- if !matched {
- return fmt.Errorf("invalid windows path %v", path)
- }
- return nil
- }
- if !filepath.IsAbs(path) {
- return fmt.Errorf("%v is not an absolute path", path)
- }
- return nil
-}
-
-// Check whether pathB is nested whithin pathA
-func nestedValid(os, pathA, pathB string) (bool, error) {
- if pathA == pathB {
- return false, nil
- }
- if pathA == "/" && pathB != "" {
- return true, nil
- }
-
- var sep string
- if os == "windows" {
- sep = "\\"
- } else {
- sep = "/"
- }
-
- splitedPathA := strings.Split(filepath.Clean(pathA), sep)
- splitedPathB := strings.Split(filepath.Clean(pathB), sep)
- lenA := len(splitedPathA)
- lenB := len(splitedPathB)
-
- if lenA > lenB {
- if (lenA - lenB) == 1 {
- // if pathA is longer but not end with separator
- if splitedPathA[lenA-1] != "" {
- return false, nil
- }
- splitedPathA = splitedPathA[:lenA-1]
- } else {
- return false, nil
- }
- }
-
- for i, partA := range splitedPathA {
- if partA != splitedPathB[i] {
- return false, nil
- }
- }
-
- return true, nil
-}
-
func deviceValid(d rspec.LinuxDevice) bool {
switch d.Type {
case "b", "c", "u":
@@ -924,7 +963,7 @@ func deviceValid(d rspec.LinuxDevice) bool {
return false
}
case "p":
- if d.Major > 0 || d.Minor > 0 {
+ if d.Major != 0 || d.Minor != 0 {
return false
}
default:
@@ -935,7 +974,6 @@ func deviceValid(d rspec.LinuxDevice) bool {
func seccompActionValid(secc rspec.LinuxSeccompAction) bool {
switch secc {
- case "":
case rspec.ActKill:
case rspec.ActTrap:
case rspec.ActErrno:
diff --git a/vendor/github.com/stretchr/testify/require/doc.go b/vendor/github.com/stretchr/testify/require/doc.go
deleted file mode 100644
index 169de3922..000000000
--- a/vendor/github.com/stretchr/testify/require/doc.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Package require implements the same assertions as the `assert` package but
-// stops test execution when a test fails.
-//
-// Example Usage
-//
-// The following is a complete example using require in a standard test function:
-// import (
-// "testing"
-// "github.com/stretchr/testify/require"
-// )
-//
-// func TestSomething(t *testing.T) {
-//
-// var a string = "Hello"
-// var b string = "Hello"
-//
-// require.Equal(t, a, b, "The two words should be the same.")
-//
-// }
-//
-// Assertions
-//
-// The `require` package have same global functions as in the `assert` package,
-// but instead of returning a boolean result they call `t.FailNow()`.
-//
-// Every assertion function also takes an optional string message as the final argument,
-// allowing custom error messages to be appended to the message the assertion method outputs.
-package require
diff --git a/vendor/github.com/stretchr/testify/require/forward_requirements.go b/vendor/github.com/stretchr/testify/require/forward_requirements.go
deleted file mode 100644
index d3c2ab9bc..000000000
--- a/vendor/github.com/stretchr/testify/require/forward_requirements.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package require
-
-// Assertions provides assertion methods around the
-// TestingT interface.
-type Assertions struct {
- t TestingT
-}
-
-// New makes a new Assertions object for the specified TestingT.
-func New(t TestingT) *Assertions {
- return &Assertions{
- t: t,
- }
-}
-
-//go:generate go run ../_codegen/main.go -output-package=require -template=require_forward.go.tmpl
diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go
deleted file mode 100644
index fc567f140..000000000
--- a/vendor/github.com/stretchr/testify/require/require.go
+++ /dev/null
@@ -1,429 +0,0 @@
-/*
-* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
-* THIS FILE MUST NOT BE EDITED BY HAND
- */
-
-package require
-
-import (
- assert "github.com/stretchr/testify/assert"
- http "net/http"
- url "net/url"
- time "time"
-)
-
-// Condition uses a Comparison to assert a complex condition.
-func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) {
- if !assert.Condition(t, comp, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// Contains asserts that the specified string, list(array, slice...) or map contains the
-// specified substring or element.
-//
-// assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'")
-// assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'")
-// assert.Contains(t, {"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) {
- if !assert.Contains(t, s, contains, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
-//
-// assert.Empty(t, obj)
-//
-// Returns whether the assertion was successful (true) or not (false).
-func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
- if !assert.Empty(t, object, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// Equal asserts that two objects are equal.
-//
-// assert.Equal(t, 123, 123, "123 and 123 should be equal")
-//
-// Returns whether the assertion was successful (true) or not (false).
-//
-// Pointer variable equality is determined based on the equality of the
-// referenced values (as opposed to the memory addresses).
-func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
- if !assert.Equal(t, expected, actual, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// EqualError asserts that a function returned an error (i.e. not `nil`)
-// and that it is equal to the provided error.
-//
-// actualObj, err := SomeFunction()
-// assert.EqualError(t, err, expectedErrorString, "An error was expected")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) {
- if !assert.EqualError(t, theError, errString, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// EqualValues asserts that two objects are equal or convertable to the same types
-// and equal.
-//
-// assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
- if !assert.EqualValues(t, expected, actual, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// Error asserts that a function returned an error (i.e. not `nil`).
-//
-// actualObj, err := SomeFunction()
-// if assert.Error(t, err, "An error was expected") {
-// assert.Equal(t, err, expectedError)
-// }
-//
-// Returns whether the assertion was successful (true) or not (false).
-func Error(t TestingT, err error, msgAndArgs ...interface{}) {
- if !assert.Error(t, err, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// Exactly asserts that two objects are equal is value and type.
-//
-// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
- if !assert.Exactly(t, expected, actual, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// Fail reports a failure through
-func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) {
- if !assert.Fail(t, failureMessage, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// FailNow fails test
-func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) {
- if !assert.FailNow(t, failureMessage, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// False asserts that the specified value is false.
-//
-// assert.False(t, myBool, "myBool should be false")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func False(t TestingT, value bool, msgAndArgs ...interface{}) {
- if !assert.False(t, value, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// HTTPBodyContains asserts that a specified handler returns a
-// body that contains a string.
-//
-// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) {
- if !assert.HTTPBodyContains(t, handler, method, url, values, str) {
- t.FailNow()
- }
-}
-
-// HTTPBodyNotContains asserts that a specified handler returns a
-// body that does not contain a string.
-//
-// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) {
- if !assert.HTTPBodyNotContains(t, handler, method, url, values, str) {
- t.FailNow()
- }
-}
-
-// HTTPError asserts that a specified handler returns an error status code.
-//
-// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) {
- if !assert.HTTPError(t, handler, method, url, values) {
- t.FailNow()
- }
-}
-
-// HTTPRedirect asserts that a specified handler returns a redirect status code.
-//
-// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) {
- if !assert.HTTPRedirect(t, handler, method, url, values) {
- t.FailNow()
- }
-}
-
-// HTTPSuccess asserts that a specified handler returns a success status code.
-//
-// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
-//
-// Returns whether the assertion was successful (true) or not (false).
-func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) {
- if !assert.HTTPSuccess(t, handler, method, url, values) {
- t.FailNow()
- }
-}
-
-// Implements asserts that an object is implemented by the specified interface.
-//
-// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject")
-func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
- if !assert.Implements(t, interfaceObject, object, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// InDelta asserts that the two numerals are within delta of each other.
-//
-// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01)
-//
-// Returns whether the assertion was successful (true) or not (false).
-func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
- if !assert.InDelta(t, expected, actual, delta, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// InDeltaSlice is the same as InDelta, except it compares two slices.
-func InDeltaSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
- if !assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// InEpsilon asserts that expected and actual have a relative error less than epsilon
-//
-// Returns whether the assertion was successful (true) or not (false).
-func InEpsilon(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
- if !assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
-func InEpsilonSlice(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
- if !assert.InEpsilonSlice(t, expected, actual, epsilon, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// IsType asserts that the specified objects are of the same type.
-func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
- if !assert.IsType(t, expectedType, object, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// JSONEq asserts that two JSON strings are equivalent.
-//
-// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
-//
-// Returns whether the assertion was successful (true) or not (false).
-func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) {
- if !assert.JSONEq(t, expected, actual, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// Len asserts that the specified object has specific length.
-// Len also fails if the object has a type that len() not accept.
-//
-// assert.Len(t, mySlice, 3, "The size of slice is not 3")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) {
- if !assert.Len(t, object, length, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// Nil asserts that the specified object is nil.
-//
-// assert.Nil(t, err, "err should be nothing")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) {
- if !assert.Nil(t, object, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// NoError asserts that a function returned no error (i.e. `nil`).
-//
-// actualObj, err := SomeFunction()
-// if assert.NoError(t, err) {
-// assert.Equal(t, actualObj, expectedObj)
-// }
-//
-// Returns whether the assertion was successful (true) or not (false).
-func NoError(t TestingT, err error, msgAndArgs ...interface{}) {
- if !assert.NoError(t, err, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
-// specified substring or element.
-//
-// assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
-// assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'")
-// assert.NotContains(t, {"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) {
- if !assert.NotContains(t, s, contains, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
-//
-// if assert.NotEmpty(t, obj) {
-// assert.Equal(t, "two", obj[1])
-// }
-//
-// Returns whether the assertion was successful (true) or not (false).
-func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
- if !assert.NotEmpty(t, object, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// NotEqual asserts that the specified values are NOT equal.
-//
-// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal")
-//
-// Returns whether the assertion was successful (true) or not (false).
-//
-// Pointer variable equality is determined based on the equality of the
-// referenced values (as opposed to the memory addresses).
-func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
- if !assert.NotEqual(t, expected, actual, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// NotNil asserts that the specified object is not nil.
-//
-// assert.NotNil(t, err, "err should be something")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) {
- if !assert.NotNil(t, object, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
-//
-// assert.NotPanics(t, func(){
-// RemainCalm()
-// }, "Calling RemainCalm() should NOT panic")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
- if !assert.NotPanics(t, f, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// NotRegexp asserts that a specified regexp does not match a string.
-//
-// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
-// assert.NotRegexp(t, "^start", "it's not starting")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) {
- if !assert.NotRegexp(t, rx, str, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// NotZero asserts that i is not the zero value for its type and returns the truth.
-func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) {
- if !assert.NotZero(t, i, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// Panics asserts that the code inside the specified PanicTestFunc panics.
-//
-// assert.Panics(t, func(){
-// GoCrazy()
-// }, "Calling GoCrazy() should panic")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
- if !assert.Panics(t, f, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// Regexp asserts that a specified regexp matches a string.
-//
-// assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
-// assert.Regexp(t, "start...$", "it's not starting")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) {
- if !assert.Regexp(t, rx, str, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// True asserts that the specified value is true.
-//
-// assert.True(t, myBool, "myBool should be true")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func True(t TestingT, value bool, msgAndArgs ...interface{}) {
- if !assert.True(t, value, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// WithinDuration asserts that the two times are within duration delta of each other.
-//
-// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) {
- if !assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) {
- t.FailNow()
- }
-}
-
-// Zero asserts that i is the zero value for its type and returns the truth.
-func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) {
- if !assert.Zero(t, i, msgAndArgs...) {
- t.FailNow()
- }
-}
diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go
deleted file mode 100644
index caa18793d..000000000
--- a/vendor/github.com/stretchr/testify/require/require_forward.go
+++ /dev/null
@@ -1,353 +0,0 @@
-/*
-* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
-* THIS FILE MUST NOT BE EDITED BY HAND
- */
-
-package require
-
-import (
- assert "github.com/stretchr/testify/assert"
- http "net/http"
- url "net/url"
- time "time"
-)
-
-// Condition uses a Comparison to assert a complex condition.
-func (a *Assertions) Condition(comp assert.Comparison, msgAndArgs ...interface{}) {
- Condition(a.t, comp, msgAndArgs...)
-}
-
-// Contains asserts that the specified string, list(array, slice...) or map contains the
-// specified substring or element.
-//
-// a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'")
-// a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'")
-// a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) {
- Contains(a.t, s, contains, msgAndArgs...)
-}
-
-// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
-//
-// a.Empty(obj)
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) {
- Empty(a.t, object, msgAndArgs...)
-}
-
-// Equal asserts that two objects are equal.
-//
-// a.Equal(123, 123, "123 and 123 should be equal")
-//
-// Returns whether the assertion was successful (true) or not (false).
-//
-// Pointer variable equality is determined based on the equality of the
-// referenced values (as opposed to the memory addresses).
-func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
- Equal(a.t, expected, actual, msgAndArgs...)
-}
-
-// EqualError asserts that a function returned an error (i.e. not `nil`)
-// and that it is equal to the provided error.
-//
-// actualObj, err := SomeFunction()
-// a.EqualError(err, expectedErrorString, "An error was expected")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) {
- EqualError(a.t, theError, errString, msgAndArgs...)
-}
-
-// EqualValues asserts that two objects are equal or convertable to the same types
-// and equal.
-//
-// a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
- EqualValues(a.t, expected, actual, msgAndArgs...)
-}
-
-// Error asserts that a function returned an error (i.e. not `nil`).
-//
-// actualObj, err := SomeFunction()
-// if a.Error(err, "An error was expected") {
-// assert.Equal(t, err, expectedError)
-// }
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) Error(err error, msgAndArgs ...interface{}) {
- Error(a.t, err, msgAndArgs...)
-}
-
-// Exactly asserts that two objects are equal is value and type.
-//
-// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
- Exactly(a.t, expected, actual, msgAndArgs...)
-}
-
-// Fail reports a failure through
-func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) {
- Fail(a.t, failureMessage, msgAndArgs...)
-}
-
-// FailNow fails test
-func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) {
- FailNow(a.t, failureMessage, msgAndArgs...)
-}
-
-// False asserts that the specified value is false.
-//
-// a.False(myBool, "myBool should be false")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) False(value bool, msgAndArgs ...interface{}) {
- False(a.t, value, msgAndArgs...)
-}
-
-// HTTPBodyContains asserts that a specified handler returns a
-// body that contains a string.
-//
-// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) {
- HTTPBodyContains(a.t, handler, method, url, values, str)
-}
-
-// HTTPBodyNotContains asserts that a specified handler returns a
-// body that does not contain a string.
-//
-// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) {
- HTTPBodyNotContains(a.t, handler, method, url, values, str)
-}
-
-// HTTPError asserts that a specified handler returns an error status code.
-//
-// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) {
- HTTPError(a.t, handler, method, url, values)
-}
-
-// HTTPRedirect asserts that a specified handler returns a redirect status code.
-//
-// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) {
- HTTPRedirect(a.t, handler, method, url, values)
-}
-
-// HTTPSuccess asserts that a specified handler returns a success status code.
-//
-// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) {
- HTTPSuccess(a.t, handler, method, url, values)
-}
-
-// Implements asserts that an object is implemented by the specified interface.
-//
-// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject")
-func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
- Implements(a.t, interfaceObject, object, msgAndArgs...)
-}
-
-// InDelta asserts that the two numerals are within delta of each other.
-//
-// a.InDelta(math.Pi, (22 / 7.0), 0.01)
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
- InDelta(a.t, expected, actual, delta, msgAndArgs...)
-}
-
-// InDeltaSlice is the same as InDelta, except it compares two slices.
-func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
- InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
-}
-
-// InEpsilon asserts that expected and actual have a relative error less than epsilon
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
- InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
-}
-
-// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
-func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
- InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...)
-}
-
-// IsType asserts that the specified objects are of the same type.
-func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
- IsType(a.t, expectedType, object, msgAndArgs...)
-}
-
-// JSONEq asserts that two JSON strings are equivalent.
-//
-// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) {
- JSONEq(a.t, expected, actual, msgAndArgs...)
-}
-
-// Len asserts that the specified object has specific length.
-// Len also fails if the object has a type that len() not accept.
-//
-// a.Len(mySlice, 3, "The size of slice is not 3")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) {
- Len(a.t, object, length, msgAndArgs...)
-}
-
-// Nil asserts that the specified object is nil.
-//
-// a.Nil(err, "err should be nothing")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) {
- Nil(a.t, object, msgAndArgs...)
-}
-
-// NoError asserts that a function returned no error (i.e. `nil`).
-//
-// actualObj, err := SomeFunction()
-// if a.NoError(err) {
-// assert.Equal(t, actualObj, expectedObj)
-// }
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) {
- NoError(a.t, err, msgAndArgs...)
-}
-
-// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
-// specified substring or element.
-//
-// a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
-// a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'")
-// a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) {
- NotContains(a.t, s, contains, msgAndArgs...)
-}
-
-// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
-//
-// if a.NotEmpty(obj) {
-// assert.Equal(t, "two", obj[1])
-// }
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) {
- NotEmpty(a.t, object, msgAndArgs...)
-}
-
-// NotEqual asserts that the specified values are NOT equal.
-//
-// a.NotEqual(obj1, obj2, "two objects shouldn't be equal")
-//
-// Returns whether the assertion was successful (true) or not (false).
-//
-// Pointer variable equality is determined based on the equality of the
-// referenced values (as opposed to the memory addresses).
-func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
- NotEqual(a.t, expected, actual, msgAndArgs...)
-}
-
-// NotNil asserts that the specified object is not nil.
-//
-// a.NotNil(err, "err should be something")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) {
- NotNil(a.t, object, msgAndArgs...)
-}
-
-// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
-//
-// a.NotPanics(func(){
-// RemainCalm()
-// }, "Calling RemainCalm() should NOT panic")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{}) {
- NotPanics(a.t, f, msgAndArgs...)
-}
-
-// NotRegexp asserts that a specified regexp does not match a string.
-//
-// a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
-// a.NotRegexp("^start", "it's not starting")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) {
- NotRegexp(a.t, rx, str, msgAndArgs...)
-}
-
-// NotZero asserts that i is not the zero value for its type and returns the truth.
-func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) {
- NotZero(a.t, i, msgAndArgs...)
-}
-
-// Panics asserts that the code inside the specified PanicTestFunc panics.
-//
-// a.Panics(func(){
-// GoCrazy()
-// }, "Calling GoCrazy() should panic")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) {
- Panics(a.t, f, msgAndArgs...)
-}
-
-// Regexp asserts that a specified regexp matches a string.
-//
-// a.Regexp(regexp.MustCompile("start"), "it's starting")
-// a.Regexp("start...$", "it's not starting")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) {
- Regexp(a.t, rx, str, msgAndArgs...)
-}
-
-// True asserts that the specified value is true.
-//
-// a.True(myBool, "myBool should be true")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) True(value bool, msgAndArgs ...interface{}) {
- True(a.t, value, msgAndArgs...)
-}
-
-// WithinDuration asserts that the two times are within duration delta of each other.
-//
-// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
-//
-// Returns whether the assertion was successful (true) or not (false).
-func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) {
- WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
-}
-
-// Zero asserts that i is the zero value for its type and returns the truth.
-func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) {
- Zero(a.t, i, msgAndArgs...)
-}
diff --git a/vendor/github.com/stretchr/testify/require/requirements.go b/vendor/github.com/stretchr/testify/require/requirements.go
deleted file mode 100644
index 41147562d..000000000
--- a/vendor/github.com/stretchr/testify/require/requirements.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package require
-
-// TestingT is an interface wrapper around *testing.T
-type TestingT interface {
- Errorf(format string, args ...interface{})
- FailNow()
-}
-
-//go:generate go run ../_codegen/main.go -output-package=require -template=require.go.tmpl
diff --git a/vendor/k8s.io/utils/LICENSE b/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt
index d64569567..55ede8a42 100644
--- a/vendor/k8s.io/utils/LICENSE
+++ b/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt
@@ -187,7 +187,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
- Copyright [yyyy] [name of copyright owner]
+ Copyright 2015 xeipuuv
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/xeipuuv/gojsonpointer/README.md b/vendor/github.com/xeipuuv/gojsonpointer/README.md
new file mode 100644
index 000000000..dbe4d5082
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonpointer/README.md
@@ -0,0 +1,8 @@
+# gojsonpointer
+An implementation of JSON Pointer - Go language
+
+## References
+http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
+
+### Note
+The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented.
diff --git a/vendor/github.com/xeipuuv/gojsonpointer/pointer.go b/vendor/github.com/xeipuuv/gojsonpointer/pointer.go
new file mode 100644
index 000000000..06f1918e8
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonpointer/pointer.go
@@ -0,0 +1,190 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonpointer
+// repository-desc An implementation of JSON Pointer - Go language
+//
+// description Main and unique file.
+//
+// created 25-02-2013
+
+package gojsonpointer
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+const (
+ const_empty_pointer = ``
+ const_pointer_separator = `/`
+
+ const_invalid_start = `JSON pointer must be empty or start with a "` + const_pointer_separator + `"`
+)
+
+type implStruct struct {
+ mode string // "SET" or "GET"
+
+ inDocument interface{}
+
+ setInValue interface{}
+
+ getOutNode interface{}
+ getOutKind reflect.Kind
+ outError error
+}
+
+type JsonPointer struct {
+ referenceTokens []string
+}
+
+// NewJsonPointer parses the given string JSON pointer and returns an object
+func NewJsonPointer(jsonPointerString string) (p JsonPointer, err error) {
+
+ // Pointer to the root of the document
+ if len(jsonPointerString) == 0 {
+ // Keep referenceTokens nil
+ return
+ }
+ if jsonPointerString[0] != '/' {
+ return p, errors.New(const_invalid_start)
+ }
+
+ p.referenceTokens = strings.Split(jsonPointerString[1:], const_pointer_separator)
+ return
+}
+
+// Uses the pointer to retrieve a value from a JSON document
+func (p *JsonPointer) Get(document interface{}) (interface{}, reflect.Kind, error) {
+
+ is := &implStruct{mode: "GET", inDocument: document}
+ p.implementation(is)
+ return is.getOutNode, is.getOutKind, is.outError
+
+}
+
+// Uses the pointer to update a value from a JSON document
+func (p *JsonPointer) Set(document interface{}, value interface{}) (interface{}, error) {
+
+ is := &implStruct{mode: "SET", inDocument: document, setInValue: value}
+ p.implementation(is)
+ return document, is.outError
+
+}
+
+// Both Get and Set functions use the same implementation to avoid code duplication
+func (p *JsonPointer) implementation(i *implStruct) {
+
+ kind := reflect.Invalid
+
+ // Full document when empty
+ if len(p.referenceTokens) == 0 {
+ i.getOutNode = i.inDocument
+ i.outError = nil
+ i.getOutKind = kind
+ i.outError = nil
+ return
+ }
+
+ node := i.inDocument
+
+ for ti, token := range p.referenceTokens {
+
+ isLastToken := ti == len(p.referenceTokens)-1
+
+ switch v := node.(type) {
+
+ case map[string]interface{}:
+ decodedToken := decodeReferenceToken(token)
+ if _, ok := v[decodedToken]; ok {
+ node = v[decodedToken]
+ if isLastToken && i.mode == "SET" {
+ v[decodedToken] = i.setInValue
+ }
+ } else {
+ i.outError = fmt.Errorf("Object has no key '%s'", decodedToken)
+ i.getOutKind = reflect.Map
+ i.getOutNode = nil
+ return
+ }
+
+ case []interface{}:
+ tokenIndex, err := strconv.Atoi(token)
+ if err != nil {
+ i.outError = fmt.Errorf("Invalid array index '%s'", token)
+ i.getOutKind = reflect.Slice
+ i.getOutNode = nil
+ return
+ }
+ if tokenIndex < 0 || tokenIndex >= len(v) {
+ i.outError = fmt.Errorf("Out of bound array[0,%d] index '%d'", len(v), tokenIndex)
+ i.getOutKind = reflect.Slice
+ i.getOutNode = nil
+ return
+ }
+
+ node = v[tokenIndex]
+ if isLastToken && i.mode == "SET" {
+ v[tokenIndex] = i.setInValue
+ }
+
+ default:
+ i.outError = fmt.Errorf("Invalid token reference '%s'", token)
+ i.getOutKind = reflect.ValueOf(node).Kind()
+ i.getOutNode = nil
+ return
+ }
+
+ }
+
+ i.getOutNode = node
+ i.getOutKind = reflect.ValueOf(node).Kind()
+ i.outError = nil
+}
+
+// Pointer to string representation function
+func (p *JsonPointer) String() string {
+
+ if len(p.referenceTokens) == 0 {
+ return const_empty_pointer
+ }
+
+ pointerString := const_pointer_separator + strings.Join(p.referenceTokens, const_pointer_separator)
+
+ return pointerString
+}
+
+// Specific JSON pointer encoding here
+// ~0 => ~
+// ~1 => /
+// ... and vice versa
+
+func decodeReferenceToken(token string) string {
+ step1 := strings.Replace(token, `~1`, `/`, -1)
+ step2 := strings.Replace(step1, `~0`, `~`, -1)
+ return step2
+}
+
+func encodeReferenceToken(token string) string {
+ step1 := strings.Replace(token, `~`, `~0`, -1)
+ step2 := strings.Replace(step1, `/`, `~1`, -1)
+ return step2
+}
diff --git a/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt
new file mode 100644
index 000000000..55ede8a42
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2015 xeipuuv
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/xeipuuv/gojsonreference/README.md b/vendor/github.com/xeipuuv/gojsonreference/README.md
new file mode 100644
index 000000000..9ab6e1eb1
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonreference/README.md
@@ -0,0 +1,10 @@
+# gojsonreference
+An implementation of JSON Reference - Go language
+
+## Dependencies
+https://github.com/xeipuuv/gojsonpointer
+
+## References
+http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
+
+http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03
diff --git a/vendor/github.com/xeipuuv/gojsonreference/reference.go b/vendor/github.com/xeipuuv/gojsonreference/reference.go
new file mode 100644
index 000000000..d4d2eca0a
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonreference/reference.go
@@ -0,0 +1,141 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonreference
+// repository-desc An implementation of JSON Reference - Go language
+//
+// description Main and unique file.
+//
+// created 26-02-2013
+
+package gojsonreference
+
+import (
+ "errors"
+ "github.com/xeipuuv/gojsonpointer"
+ "net/url"
+ "path/filepath"
+ "runtime"
+ "strings"
+)
+
+const (
+ const_fragment_char = `#`
+)
+
+func NewJsonReference(jsonReferenceString string) (JsonReference, error) {
+
+ var r JsonReference
+ err := r.parse(jsonReferenceString)
+ return r, err
+
+}
+
+type JsonReference struct {
+ referenceUrl *url.URL
+ referencePointer gojsonpointer.JsonPointer
+
+ HasFullUrl bool
+ HasUrlPathOnly bool
+ HasFragmentOnly bool
+ HasFileScheme bool
+ HasFullFilePath bool
+}
+
+func (r *JsonReference) GetUrl() *url.URL {
+ return r.referenceUrl
+}
+
+func (r *JsonReference) GetPointer() *gojsonpointer.JsonPointer {
+ return &r.referencePointer
+}
+
+func (r *JsonReference) String() string {
+
+ if r.referenceUrl != nil {
+ return r.referenceUrl.String()
+ }
+
+ if r.HasFragmentOnly {
+ return const_fragment_char + r.referencePointer.String()
+ }
+
+ return r.referencePointer.String()
+}
+
+func (r *JsonReference) IsCanonical() bool {
+ return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullUrl)
+}
+
+// "Constructor", parses the given string JSON reference
+func (r *JsonReference) parse(jsonReferenceString string) (err error) {
+
+ r.referenceUrl, err = url.Parse(jsonReferenceString)
+ if err != nil {
+ return
+ }
+ refUrl := r.referenceUrl
+
+ if refUrl.Scheme != "" && refUrl.Host != "" {
+ r.HasFullUrl = true
+ } else {
+ if refUrl.Path != "" {
+ r.HasUrlPathOnly = true
+ } else if refUrl.RawQuery == "" && refUrl.Fragment != "" {
+ r.HasFragmentOnly = true
+ }
+ }
+
+ r.HasFileScheme = refUrl.Scheme == "file"
+ if runtime.GOOS == "windows" {
+ // on Windows, a file URL may have an extra leading slash, and if it
+ // doesn't then its first component will be treated as the host by the
+ // Go runtime
+ if refUrl.Host == "" && strings.HasPrefix(refUrl.Path, "/") {
+ r.HasFullFilePath = filepath.IsAbs(refUrl.Path[1:])
+ } else {
+ r.HasFullFilePath = filepath.IsAbs(refUrl.Host + refUrl.Path)
+ }
+ } else {
+ r.HasFullFilePath = filepath.IsAbs(refUrl.Path)
+ }
+
+ // invalid json-pointer error means url has no json-pointer fragment. simply ignore error
+ r.referencePointer, _ = gojsonpointer.NewJsonPointer(refUrl.Fragment)
+
+ return
+}
+
+// Creates a new reference from a parent and a child
+// If the child cannot inherit from the parent, an error is returned
+func (r *JsonReference) Inherits(child JsonReference) (*JsonReference, error) {
+ childUrl := child.GetUrl()
+ parentUrl := r.GetUrl()
+ if childUrl == nil {
+ return nil, errors.New("childUrl is nil!")
+ }
+ if parentUrl == nil {
+ return nil, errors.New("parentUrl is nil!")
+ }
+
+ ref, err := NewJsonReference(parentUrl.ResolveReference(childUrl).String())
+ if err != nil {
+ return nil, err
+ }
+ return &ref, err
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt
new file mode 100644
index 000000000..55ede8a42
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2015 xeipuuv
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/xeipuuv/gojsonschema/README.md b/vendor/github.com/xeipuuv/gojsonschema/README.md
new file mode 100644
index 000000000..e02976bc6
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/README.md
@@ -0,0 +1,294 @@
+[![Build Status](https://travis-ci.org/xeipuuv/gojsonschema.svg)](https://travis-ci.org/xeipuuv/gojsonschema)
+
+# gojsonschema
+
+## Description
+
+An implementation of JSON Schema, based on IETF's draft v4 - Go language
+
+References :
+
+* http://json-schema.org
+* http://json-schema.org/latest/json-schema-core.html
+* http://json-schema.org/latest/json-schema-validation.html
+
+## Installation
+
+```
+go get github.com/xeipuuv/gojsonschema
+```
+
+Dependencies :
+* [github.com/xeipuuv/gojsonpointer](https://github.com/xeipuuv/gojsonpointer)
+* [github.com/xeipuuv/gojsonreference](https://github.com/xeipuuv/gojsonreference)
+* [github.com/stretchr/testify/assert](https://github.com/stretchr/testify#assert-package)
+
+## Usage
+
+### Example
+
+```go
+
+package main
+
+import (
+ "fmt"
+ "github.com/xeipuuv/gojsonschema"
+)
+
+func main() {
+
+ schemaLoader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json")
+ documentLoader := gojsonschema.NewReferenceLoader("file:///home/me/document.json")
+
+ result, err := gojsonschema.Validate(schemaLoader, documentLoader)
+ if err != nil {
+ panic(err.Error())
+ }
+
+ if result.Valid() {
+ fmt.Printf("The document is valid\n")
+ } else {
+ fmt.Printf("The document is not valid. see errors :\n")
+ for _, desc := range result.Errors() {
+ fmt.Printf("- %s\n", desc)
+ }
+ }
+
+}
+
+
+```
+
+#### Loaders
+
+There are various ways to load your JSON data.
+In order to load your schemas and documents,
+first declare an appropriate loader :
+
+* Web / HTTP, using a reference :
+
+```go
+loader := gojsonschema.NewReferenceLoader("http://www.some_host.com/schema.json")
+```
+
+* Local file, using a reference :
+
+```go
+loader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json")
+```
+
+References use the URI scheme, the prefix (file://) and a full path to the file are required.
+
+* JSON strings :
+
+```go
+loader := gojsonschema.NewStringLoader(`{"type": "string"}`)
+```
+
+* Custom Go types :
+
+```go
+m := map[string]interface{}{"type": "string"}
+loader := gojsonschema.NewGoLoader(m)
+```
+
+And
+
+```go
+type Root struct {
+ Users []User `json:"users"`
+}
+
+type User struct {
+ Name string `json:"name"`
+}
+
+...
+
+data := Root{}
+data.Users = append(data.Users, User{"John"})
+data.Users = append(data.Users, User{"Sophia"})
+data.Users = append(data.Users, User{"Bill"})
+
+loader := gojsonschema.NewGoLoader(data)
+```
+
+#### Validation
+
+Once the loaders are set, validation is easy :
+
+```go
+result, err := gojsonschema.Validate(schemaLoader, documentLoader)
+```
+
+Alternatively, you might want to load a schema only once and process to multiple validations :
+
+```go
+schema, err := gojsonschema.NewSchema(schemaLoader)
+...
+result1, err := schema.Validate(documentLoader1)
+...
+result2, err := schema.Validate(documentLoader2)
+...
+// etc ...
+```
+
+To check the result :
+
+```go
+ if result.Valid() {
+ fmt.Printf("The document is valid\n")
+ } else {
+ fmt.Printf("The document is not valid. see errors :\n")
+ for _, err := range result.Errors() {
+ // Err implements the ResultError interface
+ fmt.Printf("- %s\n", err)
+ }
+ }
+```
+
+## Working with Errors
+
+The library handles string error codes which you can customize by creating your own gojsonschema.locale and setting it
+```go
+gojsonschema.Locale = YourCustomLocale{}
+```
+
+However, each error contains additional contextual information.
+
+**err.Type()**: *string* Returns the "type" of error that occurred. Note you can also type check. See below
+
+Note: An error of RequiredType has an err.Type() return value of "required"
+
+ "required": RequiredError
+ "invalid_type": InvalidTypeError
+ "number_any_of": NumberAnyOfError
+ "number_one_of": NumberOneOfError
+ "number_all_of": NumberAllOfError
+ "number_not": NumberNotError
+ "missing_dependency": MissingDependencyError
+ "internal": InternalError
+ "enum": EnumError
+ "array_no_additional_items": ArrayNoAdditionalItemsError
+ "array_min_items": ArrayMinItemsError
+ "array_max_items": ArrayMaxItemsError
+ "unique": ItemsMustBeUniqueError
+ "array_min_properties": ArrayMinPropertiesError
+ "array_max_properties": ArrayMaxPropertiesError
+ "additional_property_not_allowed": AdditionalPropertyNotAllowedError
+ "invalid_property_pattern": InvalidPropertyPatternError
+ "string_gte": StringLengthGTEError
+ "string_lte": StringLengthLTEError
+ "pattern": DoesNotMatchPatternError
+ "multiple_of": MultipleOfError
+ "number_gte": NumberGTEError
+ "number_gt": NumberGTError
+ "number_lte": NumberLTEError
+ "number_lt": NumberLTError
+
+**err.Value()**: *interface{}* Returns the value given
+
+**err.Context()**: *gojsonschema.jsonContext* Returns the context. This has a String() method that will print something like this: (root).firstName
+
+**err.Field()**: *string* Returns the fieldname in the format firstName, or for embedded properties, person.firstName. This returns the same as the String() method on *err.Context()* but removes the (root). prefix.
+
+**err.Description()**: *string* The error description. This is based on the locale you are using. See the beginning of this section for overwriting the locale with a custom implementation.
+
+**err.Details()**: *gojsonschema.ErrorDetails* Returns a map[string]interface{} of additional error details specific to the error. For example, GTE errors will have a "min" value, LTE will have a "max" value. See errors.go for a full description of all the error details. Every error always contains a "field" key that holds the value of *err.Field()*
+
+Note in most cases, the err.Details() will be used to generate replacement strings in your locales, and not used directly. These strings follow the text/template format i.e.
+```
+{{.field}} must be greater than or equal to {{.min}}
+```
+
+The library allows you to specify custom template functions, should you require more complex error message handling.
+```go
+gojsonschema.ErrorTemplateFuncs = map[string]interface{}{
+ "allcaps": func(s string) string {
+ return strings.ToUpper(s)
+ },
+}
+```
+
+Given the above definition, you can use the custom function `"allcaps"` in your localization templates:
+```
+{{allcaps .field}} must be greater than or equal to {{.min}}
+```
+
+The above error message would then be rendered with the `field` value in capital letters. For example:
+```
+"PASSWORD must be greater than or equal to 8"
+```
+
+Learn more about what types of template functions you can use in `ErrorTemplateFuncs` by referring to Go's [text/template FuncMap](https://golang.org/pkg/text/template/#FuncMap) type.
+
+## Formats
+JSON Schema allows for optional "format" property to validate instances against well-known formats. gojsonschema ships with all of the formats defined in the spec that you can use like this:
+````json
+{"type": "string", "format": "email"}
+````
+Available formats: date-time, hostname, email, ipv4, ipv6, uri, uri-reference.
+
+For repetitive or more complex formats, you can create custom format checkers and add them to gojsonschema like this:
+
+```go
+// Define the format checker
+type RoleFormatChecker struct {}
+
+// Ensure it meets the gojsonschema.FormatChecker interface
+func (f RoleFormatChecker) IsFormat(input interface{}) bool {
+
+ asString, ok := input.(string)
+ if ok == false {
+ return false
+ }
+
+ return strings.HasPrefix("ROLE_", asString)
+}
+
+// Add it to the library
+gojsonschema.FormatCheckers.Add("role", RoleFormatChecker{})
+````
+
+Now to use in your json schema:
+````json
+{"type": "string", "format": "role"}
+````
+
+Another example would be to check if the provided integer matches an id on database:
+
+JSON schema:
+```json
+{"type": "integer", "format": "ValidUserId"}
+```
+
+```go
+// Define the format checker
+type ValidUserIdFormatChecker struct {}
+
+// Ensure it meets the gojsonschema.FormatChecker interface
+func (f ValidUserIdFormatChecker) IsFormat(input interface{}) bool {
+
+ asFloat64, ok := input.(float64) // Numbers are always float64 here
+ if ok == false {
+ return false
+ }
+
+ // XXX
+ // do the magic on the database looking for the int(asFloat64)
+
+ return true
+}
+
+// Add it to the library
+gojsonschema.FormatCheckers.Add("ValidUserId", ValidUserIdFormatChecker{})
+````
+
+
+
+## Uses
+
+gojsonschema uses the following test suite :
+
+https://github.com/json-schema/JSON-Schema-Test-Suite
diff --git a/vendor/github.com/xeipuuv/gojsonschema/errors.go b/vendor/github.com/xeipuuv/gojsonschema/errors.go
new file mode 100644
index 000000000..d39f01959
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/errors.go
@@ -0,0 +1,283 @@
+package gojsonschema
+
+import (
+ "bytes"
+ "sync"
+ "text/template"
+)
+
+var errorTemplates errorTemplate = errorTemplate{template.New("errors-new"), sync.RWMutex{}}
+
+// template.Template is not thread-safe for writing, so some locking is done
+// sync.RWMutex is used for efficiently locking when new templates are created
+type errorTemplate struct {
+ *template.Template
+ sync.RWMutex
+}
+
+type (
+ // RequiredError. ErrorDetails: property string
+ RequiredError struct {
+ ResultErrorFields
+ }
+
+ // InvalidTypeError. ErrorDetails: expected, given
+ InvalidTypeError struct {
+ ResultErrorFields
+ }
+
+ // NumberAnyOfError. ErrorDetails: -
+ NumberAnyOfError struct {
+ ResultErrorFields
+ }
+
+ // NumberOneOfError. ErrorDetails: -
+ NumberOneOfError struct {
+ ResultErrorFields
+ }
+
+ // NumberAllOfError. ErrorDetails: -
+ NumberAllOfError struct {
+ ResultErrorFields
+ }
+
+ // NumberNotError. ErrorDetails: -
+ NumberNotError struct {
+ ResultErrorFields
+ }
+
+ // MissingDependencyError. ErrorDetails: dependency
+ MissingDependencyError struct {
+ ResultErrorFields
+ }
+
+ // InternalError. ErrorDetails: error
+ InternalError struct {
+ ResultErrorFields
+ }
+
+ // EnumError. ErrorDetails: allowed
+ EnumError struct {
+ ResultErrorFields
+ }
+
+ // ArrayNoAdditionalItemsError. ErrorDetails: -
+ ArrayNoAdditionalItemsError struct {
+ ResultErrorFields
+ }
+
+ // ArrayMinItemsError. ErrorDetails: min
+ ArrayMinItemsError struct {
+ ResultErrorFields
+ }
+
+ // ArrayMaxItemsError. ErrorDetails: max
+ ArrayMaxItemsError struct {
+ ResultErrorFields
+ }
+
+ // ItemsMustBeUniqueError. ErrorDetails: type
+ ItemsMustBeUniqueError struct {
+ ResultErrorFields
+ }
+
+ // ArrayMinPropertiesError. ErrorDetails: min
+ ArrayMinPropertiesError struct {
+ ResultErrorFields
+ }
+
+ // ArrayMaxPropertiesError. ErrorDetails: max
+ ArrayMaxPropertiesError struct {
+ ResultErrorFields
+ }
+
+ // AdditionalPropertyNotAllowedError. ErrorDetails: property
+ AdditionalPropertyNotAllowedError struct {
+ ResultErrorFields
+ }
+
+ // InvalidPropertyPatternError. ErrorDetails: property, pattern
+ InvalidPropertyPatternError struct {
+ ResultErrorFields
+ }
+
+ // StringLengthGTEError. ErrorDetails: min
+ StringLengthGTEError struct {
+ ResultErrorFields
+ }
+
+ // StringLengthLTEError. ErrorDetails: max
+ StringLengthLTEError struct {
+ ResultErrorFields
+ }
+
+ // DoesNotMatchPatternError. ErrorDetails: pattern
+ DoesNotMatchPatternError struct {
+ ResultErrorFields
+ }
+
+ // DoesNotMatchFormatError. ErrorDetails: format
+ DoesNotMatchFormatError struct {
+ ResultErrorFields
+ }
+
+ // MultipleOfError. ErrorDetails: multiple
+ MultipleOfError struct {
+ ResultErrorFields
+ }
+
+ // NumberGTEError. ErrorDetails: min
+ NumberGTEError struct {
+ ResultErrorFields
+ }
+
+ // NumberGTError. ErrorDetails: min
+ NumberGTError struct {
+ ResultErrorFields
+ }
+
+ // NumberLTEError. ErrorDetails: max
+ NumberLTEError struct {
+ ResultErrorFields
+ }
+
+ // NumberLTError. ErrorDetails: max
+ NumberLTError struct {
+ ResultErrorFields
+ }
+)
+
+// newError takes a ResultError type and sets the type, context, description, details, value, and field
+func newError(err ResultError, context *jsonContext, value interface{}, locale locale, details ErrorDetails) {
+ var t string
+ var d string
+ switch err.(type) {
+ case *RequiredError:
+ t = "required"
+ d = locale.Required()
+ case *InvalidTypeError:
+ t = "invalid_type"
+ d = locale.InvalidType()
+ case *NumberAnyOfError:
+ t = "number_any_of"
+ d = locale.NumberAnyOf()
+ case *NumberOneOfError:
+ t = "number_one_of"
+ d = locale.NumberOneOf()
+ case *NumberAllOfError:
+ t = "number_all_of"
+ d = locale.NumberAllOf()
+ case *NumberNotError:
+ t = "number_not"
+ d = locale.NumberNot()
+ case *MissingDependencyError:
+ t = "missing_dependency"
+ d = locale.MissingDependency()
+ case *InternalError:
+ t = "internal"
+ d = locale.Internal()
+ case *EnumError:
+ t = "enum"
+ d = locale.Enum()
+ case *ArrayNoAdditionalItemsError:
+ t = "array_no_additional_items"
+ d = locale.ArrayNoAdditionalItems()
+ case *ArrayMinItemsError:
+ t = "array_min_items"
+ d = locale.ArrayMinItems()
+ case *ArrayMaxItemsError:
+ t = "array_max_items"
+ d = locale.ArrayMaxItems()
+ case *ItemsMustBeUniqueError:
+ t = "unique"
+ d = locale.Unique()
+ case *ArrayMinPropertiesError:
+ t = "array_min_properties"
+ d = locale.ArrayMinProperties()
+ case *ArrayMaxPropertiesError:
+ t = "array_max_properties"
+ d = locale.ArrayMaxProperties()
+ case *AdditionalPropertyNotAllowedError:
+ t = "additional_property_not_allowed"
+ d = locale.AdditionalPropertyNotAllowed()
+ case *InvalidPropertyPatternError:
+ t = "invalid_property_pattern"
+ d = locale.InvalidPropertyPattern()
+ case *StringLengthGTEError:
+ t = "string_gte"
+ d = locale.StringGTE()
+ case *StringLengthLTEError:
+ t = "string_lte"
+ d = locale.StringLTE()
+ case *DoesNotMatchPatternError:
+ t = "pattern"
+ d = locale.DoesNotMatchPattern()
+ case *DoesNotMatchFormatError:
+ t = "format"
+ d = locale.DoesNotMatchFormat()
+ case *MultipleOfError:
+ t = "multiple_of"
+ d = locale.MultipleOf()
+ case *NumberGTEError:
+ t = "number_gte"
+ d = locale.NumberGTE()
+ case *NumberGTError:
+ t = "number_gt"
+ d = locale.NumberGT()
+ case *NumberLTEError:
+ t = "number_lte"
+ d = locale.NumberLTE()
+ case *NumberLTError:
+ t = "number_lt"
+ d = locale.NumberLT()
+ }
+
+ err.SetType(t)
+ err.SetContext(context)
+ err.SetValue(value)
+ err.SetDetails(details)
+ details["field"] = err.Field()
+
+ if _, exists := details["context"]; !exists && context != nil {
+ details["context"] = context.String()
+ }
+
+ err.SetDescription(formatErrorDescription(d, details))
+}
+
+// formatErrorDescription takes a string in the default text/template
+// format and converts it to a string with replacements. The fields come
+// from the ErrorDetails struct and vary for each type of error.
+func formatErrorDescription(s string, details ErrorDetails) string {
+
+ var tpl *template.Template
+ var descrAsBuffer bytes.Buffer
+ var err error
+
+ errorTemplates.RLock()
+ tpl = errorTemplates.Lookup(s)
+ errorTemplates.RUnlock()
+
+ if tpl == nil {
+ errorTemplates.Lock()
+ tpl = errorTemplates.New(s)
+
+ if ErrorTemplateFuncs != nil {
+ tpl.Funcs(ErrorTemplateFuncs)
+ }
+
+ tpl, err = tpl.Parse(s)
+ errorTemplates.Unlock()
+
+ if err != nil {
+ return err.Error()
+ }
+ }
+
+ err = tpl.Execute(&descrAsBuffer, details)
+ if err != nil {
+ return err.Error()
+ }
+
+ return descrAsBuffer.String()
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go b/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go
new file mode 100644
index 000000000..c6a07923b
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go
@@ -0,0 +1,250 @@
+package gojsonschema
+
+import (
+ "net"
+ "net/url"
+ "regexp"
+ "strings"
+ "time"
+)
+
+type (
+ // FormatChecker is the interface all formatters added to FormatCheckerChain must implement
+ FormatChecker interface {
+ IsFormat(input interface{}) bool
+ }
+
+ // FormatCheckerChain holds the formatters
+ FormatCheckerChain struct {
+ formatters map[string]FormatChecker
+ }
+
+ // EmailFormatter verifies email address formats
+ EmailFormatChecker struct{}
+
+ // IPV4FormatChecker verifies IP addresses in the ipv4 format
+ IPV4FormatChecker struct{}
+
+ // IPV6FormatChecker verifies IP addresses in the ipv6 format
+ IPV6FormatChecker struct{}
+
+ // DateTimeFormatChecker verifies date/time formats per RFC3339 5.6
+ //
+ // Valid formats:
+ // Partial Time: HH:MM:SS
+ // Full Date: YYYY-MM-DD
+ // Full Time: HH:MM:SSZ-07:00
+ // Date Time: YYYY-MM-DDTHH:MM:SSZ-0700
+ //
+ // Where
+ // YYYY = 4DIGIT year
+ // MM = 2DIGIT month ; 01-12
+ // DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year
+ // HH = 2DIGIT hour ; 00-23
+ // MM = 2DIGIT ; 00-59
+ // SS = 2DIGIT ; 00-58, 00-60 based on leap second rules
+ // T = Literal
+ // Z = Literal
+ //
+ // Note: Nanoseconds are also suported in all formats
+ //
+ // http://tools.ietf.org/html/rfc3339#section-5.6
+ DateTimeFormatChecker struct{}
+
+ // URIFormatChecker validates a URI with a valid Scheme per RFC3986
+ URIFormatChecker struct{}
+
+ // URIReferenceFormatChecker validates a URI or relative-reference per RFC3986
+ URIReferenceFormatChecker struct{}
+
+ // HostnameFormatChecker validates a hostname is in the correct format
+ HostnameFormatChecker struct{}
+
+ // UUIDFormatChecker validates a UUID is in the correct format
+ UUIDFormatChecker struct{}
+
+ // RegexFormatChecker validates a regex is in the correct format
+ RegexFormatChecker struct{}
+)
+
+var (
+ // Formatters holds the valid formatters, and is a public variable
+ // so library users can add custom formatters
+ FormatCheckers = FormatCheckerChain{
+ formatters: map[string]FormatChecker{
+ "date-time": DateTimeFormatChecker{},
+ "hostname": HostnameFormatChecker{},
+ "email": EmailFormatChecker{},
+ "ipv4": IPV4FormatChecker{},
+ "ipv6": IPV6FormatChecker{},
+ "uri": URIFormatChecker{},
+ "uri-reference": URIReferenceFormatChecker{},
+ "uuid": UUIDFormatChecker{},
+ "regex": RegexFormatChecker{},
+ },
+ }
+
+ // Regex credit: https://github.com/asaskevich/govalidator
+ rxEmail = regexp.MustCompile("^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$")
+
+ // Regex credit: https://www.socketloop.com/tutorials/golang-validate-hostname
+ rxHostname = regexp.MustCompile(`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`)
+
+ rxUUID = regexp.MustCompile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$")
+)
+
+// Add adds a FormatChecker to the FormatCheckerChain
+// The name used will be the value used for the format key in your json schema
+func (c *FormatCheckerChain) Add(name string, f FormatChecker) *FormatCheckerChain {
+ c.formatters[name] = f
+
+ return c
+}
+
+// Remove deletes a FormatChecker from the FormatCheckerChain (if it exists)
+func (c *FormatCheckerChain) Remove(name string) *FormatCheckerChain {
+ delete(c.formatters, name)
+
+ return c
+}
+
+// Has checks to see if the FormatCheckerChain holds a FormatChecker with the given name
+func (c *FormatCheckerChain) Has(name string) bool {
+ _, ok := c.formatters[name]
+
+ return ok
+}
+
+// IsFormat will check an input against a FormatChecker with the given name
+// to see if it is the correct format
+func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool {
+ f, ok := c.formatters[name]
+
+ if !ok {
+ return false
+ }
+
+ return f.IsFormat(input)
+}
+
+func (f EmailFormatChecker) IsFormat(input interface{}) bool {
+
+ asString, ok := input.(string)
+ if ok == false {
+ return false
+ }
+
+ return rxEmail.MatchString(asString)
+}
+
+// Credit: https://github.com/asaskevich/govalidator
+func (f IPV4FormatChecker) IsFormat(input interface{}) bool {
+
+ asString, ok := input.(string)
+ if ok == false {
+ return false
+ }
+
+ ip := net.ParseIP(asString)
+ return ip != nil && strings.Contains(asString, ".")
+}
+
+// Credit: https://github.com/asaskevich/govalidator
+func (f IPV6FormatChecker) IsFormat(input interface{}) bool {
+
+ asString, ok := input.(string)
+ if ok == false {
+ return false
+ }
+
+ ip := net.ParseIP(asString)
+ return ip != nil && strings.Contains(asString, ":")
+}
+
+func (f DateTimeFormatChecker) IsFormat(input interface{}) bool {
+
+ asString, ok := input.(string)
+ if ok == false {
+ return false
+ }
+
+ formats := []string{
+ "15:04:05",
+ "15:04:05Z07:00",
+ "2006-01-02",
+ time.RFC3339,
+ time.RFC3339Nano,
+ }
+
+ for _, format := range formats {
+ if _, err := time.Parse(format, asString); err == nil {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (f URIFormatChecker) IsFormat(input interface{}) bool {
+
+ asString, ok := input.(string)
+ if ok == false {
+ return false
+ }
+
+ u, err := url.Parse(asString)
+ if err != nil || u.Scheme == "" {
+ return false
+ }
+
+ return true
+}
+
+func (f URIReferenceFormatChecker) IsFormat(input interface{}) bool {
+
+ asString, ok := input.(string)
+ if ok == false {
+ return false
+ }
+
+ _, err := url.Parse(asString)
+ return err == nil
+}
+
+func (f HostnameFormatChecker) IsFormat(input interface{}) bool {
+
+ asString, ok := input.(string)
+ if ok == false {
+ return false
+ }
+
+ return rxHostname.MatchString(asString) && len(asString) < 256
+}
+
+func (f UUIDFormatChecker) IsFormat(input interface{}) bool {
+
+ asString, ok := input.(string)
+ if ok == false {
+ return false
+ }
+
+ return rxUUID.MatchString(asString)
+}
+
+// IsFormat implements FormatChecker interface.
+func (f RegexFormatChecker) IsFormat(input interface{}) bool {
+
+ asString, ok := input.(string)
+ if ok == false {
+ return false
+ }
+
+ if asString == "" {
+ return true
+ }
+ _, err := regexp.Compile(asString)
+ if err != nil {
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/internalLog.go b/vendor/github.com/xeipuuv/gojsonschema/internalLog.go
new file mode 100644
index 000000000..4ef7a8d03
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/internalLog.go
@@ -0,0 +1,37 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonschema
+// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description Very simple log wrapper.
+// Used for debugging/testing purposes.
+//
+// created 01-01-2015
+
+package gojsonschema
+
+import (
+ "log"
+)
+
+const internalLogEnabled = false
+
+func internalLog(format string, v ...interface{}) {
+ log.Printf(format, v...)
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go b/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go
new file mode 100644
index 000000000..fcc8d9d6f
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go
@@ -0,0 +1,72 @@
+// Copyright 2013 MongoDB, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author tolsen
+// author-github https://github.com/tolsen
+//
+// repository-name gojsonschema
+// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description Implements a persistent (immutable w/ shared structure) singly-linked list of strings for the purpose of storing a json context
+//
+// created 04-09-2013
+
+package gojsonschema
+
+import "bytes"
+
+// jsonContext implements a persistent linked-list of strings
+type jsonContext struct {
+ head string
+ tail *jsonContext
+}
+
+func newJsonContext(head string, tail *jsonContext) *jsonContext {
+ return &jsonContext{head, tail}
+}
+
+// String displays the context in reverse.
+// This plays well with the data structure's persistent nature with
+// Cons and a json document's tree structure.
+func (c *jsonContext) String(del ...string) string {
+ byteArr := make([]byte, 0, c.stringLen())
+ buf := bytes.NewBuffer(byteArr)
+ c.writeStringToBuffer(buf, del)
+
+ return buf.String()
+}
+
+func (c *jsonContext) stringLen() int {
+ length := 0
+ if c.tail != nil {
+ length = c.tail.stringLen() + 1 // add 1 for "."
+ }
+
+ length += len(c.head)
+ return length
+}
+
+func (c *jsonContext) writeStringToBuffer(buf *bytes.Buffer, del []string) {
+ if c.tail != nil {
+ c.tail.writeStringToBuffer(buf, del)
+
+ if len(del) > 0 {
+ buf.WriteString(del[0])
+ } else {
+ buf.WriteString(".")
+ }
+ }
+
+ buf.WriteString(c.head)
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go b/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go
new file mode 100644
index 000000000..a77a81e40
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go
@@ -0,0 +1,341 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonschema
+// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description Different strategies to load JSON files.
+// Includes References (file and HTTP), JSON strings and Go types.
+//
+// created 01-02-2015
+
+package gojsonschema
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+
+ "github.com/xeipuuv/gojsonreference"
+)
+
+var osFS = osFileSystem(os.Open)
+
+// JSON loader interface
+
+type JSONLoader interface {
+ JsonSource() interface{}
+ LoadJSON() (interface{}, error)
+ JsonReference() (gojsonreference.JsonReference, error)
+ LoaderFactory() JSONLoaderFactory
+}
+
+type JSONLoaderFactory interface {
+ New(source string) JSONLoader
+}
+
+type DefaultJSONLoaderFactory struct {
+}
+
+type FileSystemJSONLoaderFactory struct {
+ fs http.FileSystem
+}
+
+func (d DefaultJSONLoaderFactory) New(source string) JSONLoader {
+ return &jsonReferenceLoader{
+ fs: osFS,
+ source: source,
+ }
+}
+
+func (f FileSystemJSONLoaderFactory) New(source string) JSONLoader {
+ return &jsonReferenceLoader{
+ fs: f.fs,
+ source: source,
+ }
+}
+
+// osFileSystem is a functional wrapper for os.Open that implements http.FileSystem.
+type osFileSystem func(string) (*os.File, error)
+
+func (o osFileSystem) Open(name string) (http.File, error) {
+ return o(name)
+}
+
+// JSON Reference loader
+// references are used to load JSONs from files and HTTP
+
+type jsonReferenceLoader struct {
+ fs http.FileSystem
+ source string
+}
+
+func (l *jsonReferenceLoader) JsonSource() interface{} {
+ return l.source
+}
+
+func (l *jsonReferenceLoader) JsonReference() (gojsonreference.JsonReference, error) {
+ return gojsonreference.NewJsonReference(l.JsonSource().(string))
+}
+
+func (l *jsonReferenceLoader) LoaderFactory() JSONLoaderFactory {
+ return &FileSystemJSONLoaderFactory{
+ fs: l.fs,
+ }
+}
+
+// NewReferenceLoader returns a JSON reference loader using the given source and the local OS file system.
+func NewReferenceLoader(source string) *jsonReferenceLoader {
+ return &jsonReferenceLoader{
+ fs: osFS,
+ source: source,
+ }
+}
+
+// NewReferenceLoaderFileSystem returns a JSON reference loader using the given source and file system.
+func NewReferenceLoaderFileSystem(source string, fs http.FileSystem) *jsonReferenceLoader {
+ return &jsonReferenceLoader{
+ fs: fs,
+ source: source,
+ }
+}
+
+func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) {
+
+ var err error
+
+ reference, err := gojsonreference.NewJsonReference(l.JsonSource().(string))
+ if err != nil {
+ return nil, err
+ }
+
+ refToUrl := reference
+ refToUrl.GetUrl().Fragment = ""
+
+ var document interface{}
+
+ if reference.HasFileScheme {
+
+ filename := strings.Replace(refToUrl.GetUrl().Path, "file://", "", -1)
+ if runtime.GOOS == "windows" {
+ // on Windows, a file URL may have an extra leading slash, use slashes
+ // instead of backslashes, and have spaces escaped
+ if strings.HasPrefix(filename, "/") {
+ filename = filename[1:]
+ }
+ filename = filepath.FromSlash(filename)
+ }
+
+ document, err = l.loadFromFile(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ } else {
+
+ document, err = l.loadFromHTTP(refToUrl.String())
+ if err != nil {
+ return nil, err
+ }
+
+ }
+
+ return document, nil
+
+}
+
+func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) {
+
+ resp, err := http.Get(address)
+ if err != nil {
+ return nil, err
+ }
+
+ // must return HTTP Status 200 OK
+ if resp.StatusCode != http.StatusOK {
+ return nil, errors.New(formatErrorDescription(Locale.HttpBadStatus(), ErrorDetails{"status": resp.Status}))
+ }
+
+ bodyBuff, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ return decodeJsonUsingNumber(bytes.NewReader(bodyBuff))
+
+}
+
+func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) {
+ f, err := l.fs.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ bodyBuff, err := ioutil.ReadAll(f)
+ if err != nil {
+ return nil, err
+ }
+
+ return decodeJsonUsingNumber(bytes.NewReader(bodyBuff))
+
+}
+
+// JSON string loader
+
+type jsonStringLoader struct {
+ source string
+}
+
+func (l *jsonStringLoader) JsonSource() interface{} {
+ return l.source
+}
+
+func (l *jsonStringLoader) JsonReference() (gojsonreference.JsonReference, error) {
+ return gojsonreference.NewJsonReference("#")
+}
+
+func (l *jsonStringLoader) LoaderFactory() JSONLoaderFactory {
+ return &DefaultJSONLoaderFactory{}
+}
+
+func NewStringLoader(source string) *jsonStringLoader {
+ return &jsonStringLoader{source: source}
+}
+
+func (l *jsonStringLoader) LoadJSON() (interface{}, error) {
+
+ return decodeJsonUsingNumber(strings.NewReader(l.JsonSource().(string)))
+
+}
+
+// JSON bytes loader
+
+type jsonBytesLoader struct {
+ source []byte
+}
+
+func (l *jsonBytesLoader) JsonSource() interface{} {
+ return l.source
+}
+
+func (l *jsonBytesLoader) JsonReference() (gojsonreference.JsonReference, error) {
+ return gojsonreference.NewJsonReference("#")
+}
+
+func (l *jsonBytesLoader) LoaderFactory() JSONLoaderFactory {
+ return &DefaultJSONLoaderFactory{}
+}
+
+func NewBytesLoader(source []byte) *jsonBytesLoader {
+ return &jsonBytesLoader{source: source}
+}
+
+func (l *jsonBytesLoader) LoadJSON() (interface{}, error) {
+ return decodeJsonUsingNumber(bytes.NewReader(l.JsonSource().([]byte)))
+}
+
+// JSON Go (types) loader
+// used to load JSONs from the code as maps, interface{}, structs ...
+
+type jsonGoLoader struct {
+ source interface{}
+}
+
+func (l *jsonGoLoader) JsonSource() interface{} {
+ return l.source
+}
+
+func (l *jsonGoLoader) JsonReference() (gojsonreference.JsonReference, error) {
+ return gojsonreference.NewJsonReference("#")
+}
+
+func (l *jsonGoLoader) LoaderFactory() JSONLoaderFactory {
+ return &DefaultJSONLoaderFactory{}
+}
+
+func NewGoLoader(source interface{}) *jsonGoLoader {
+ return &jsonGoLoader{source: source}
+}
+
+func (l *jsonGoLoader) LoadJSON() (interface{}, error) {
+
+ // convert it to a compliant JSON first to avoid types "mismatches"
+
+ jsonBytes, err := json.Marshal(l.JsonSource())
+ if err != nil {
+ return nil, err
+ }
+
+ return decodeJsonUsingNumber(bytes.NewReader(jsonBytes))
+
+}
+
+type jsonIOLoader struct {
+ buf *bytes.Buffer
+}
+
+func NewReaderLoader(source io.Reader) (*jsonIOLoader, io.Reader) {
+ buf := &bytes.Buffer{}
+ return &jsonIOLoader{buf: buf}, io.TeeReader(source, buf)
+}
+
+func NewWriterLoader(source io.Writer) (*jsonIOLoader, io.Writer) {
+ buf := &bytes.Buffer{}
+ return &jsonIOLoader{buf: buf}, io.MultiWriter(source, buf)
+}
+
+func (l *jsonIOLoader) JsonSource() interface{} {
+ return l.buf.String()
+}
+
+func (l *jsonIOLoader) LoadJSON() (interface{}, error) {
+ return decodeJsonUsingNumber(l.buf)
+}
+
+func (l *jsonIOLoader) JsonReference() (gojsonreference.JsonReference, error) {
+ return gojsonreference.NewJsonReference("#")
+}
+
+func (l *jsonIOLoader) LoaderFactory() JSONLoaderFactory {
+ return &DefaultJSONLoaderFactory{}
+}
+
+func decodeJsonUsingNumber(r io.Reader) (interface{}, error) {
+
+ var document interface{}
+
+ decoder := json.NewDecoder(r)
+ decoder.UseNumber()
+
+ err := decoder.Decode(&document)
+ if err != nil {
+ return nil, err
+ }
+
+ return document, nil
+
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/locales.go b/vendor/github.com/xeipuuv/gojsonschema/locales.go
new file mode 100644
index 000000000..ee41484a7
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/locales.go
@@ -0,0 +1,286 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonschema
+// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description Contains const string and messages.
+//
+// created 01-01-2015
+
+package gojsonschema
+
+type (
+ // locale is an interface for defining custom error strings
+ locale interface {
+ Required() string
+ InvalidType() string
+ NumberAnyOf() string
+ NumberOneOf() string
+ NumberAllOf() string
+ NumberNot() string
+ MissingDependency() string
+ Internal() string
+ Enum() string
+ ArrayNotEnoughItems() string
+ ArrayNoAdditionalItems() string
+ ArrayMinItems() string
+ ArrayMaxItems() string
+ Unique() string
+ ArrayMinProperties() string
+ ArrayMaxProperties() string
+ AdditionalPropertyNotAllowed() string
+ InvalidPropertyPattern() string
+ StringGTE() string
+ StringLTE() string
+ DoesNotMatchPattern() string
+ DoesNotMatchFormat() string
+ MultipleOf() string
+ NumberGTE() string
+ NumberGT() string
+ NumberLTE() string
+ NumberLT() string
+
+ // Schema validations
+ RegexPattern() string
+ GreaterThanZero() string
+ MustBeOfA() string
+ MustBeOfAn() string
+ CannotBeUsedWithout() string
+ CannotBeGT() string
+ MustBeOfType() string
+ MustBeValidRegex() string
+ MustBeValidFormat() string
+ MustBeGTEZero() string
+ KeyCannotBeGreaterThan() string
+ KeyItemsMustBeOfType() string
+ KeyItemsMustBeUnique() string
+ ReferenceMustBeCanonical() string
+ NotAValidType() string
+ Duplicated() string
+ HttpBadStatus() string
+ ParseError() string
+
+ // ErrorFormat
+ ErrorFormat() string
+ }
+
+ // DefaultLocale is the default locale for this package
+ DefaultLocale struct{}
+)
+
+func (l DefaultLocale) Required() string {
+ return `{{.property}} is required`
+}
+
+func (l DefaultLocale) InvalidType() string {
+ return `Invalid type. Expected: {{.expected}}, given: {{.given}}`
+}
+
+func (l DefaultLocale) NumberAnyOf() string {
+ return `Must validate at least one schema (anyOf)`
+}
+
+func (l DefaultLocale) NumberOneOf() string {
+ return `Must validate one and only one schema (oneOf)`
+}
+
+func (l DefaultLocale) NumberAllOf() string {
+ return `Must validate all the schemas (allOf)`
+}
+
+func (l DefaultLocale) NumberNot() string {
+ return `Must not validate the schema (not)`
+}
+
+func (l DefaultLocale) MissingDependency() string {
+ return `Has a dependency on {{.dependency}}`
+}
+
+func (l DefaultLocale) Internal() string {
+ return `Internal Error {{.error}}`
+}
+
+func (l DefaultLocale) Enum() string {
+ return `{{.field}} must be one of the following: {{.allowed}}`
+}
+
+func (l DefaultLocale) ArrayNoAdditionalItems() string {
+ return `No additional items allowed on array`
+}
+
+func (l DefaultLocale) ArrayNotEnoughItems() string {
+ return `Not enough items on array to match positional list of schema`
+}
+
+func (l DefaultLocale) ArrayMinItems() string {
+ return `Array must have at least {{.min}} items`
+}
+
+func (l DefaultLocale) ArrayMaxItems() string {
+ return `Array must have at most {{.max}} items`
+}
+
+func (l DefaultLocale) Unique() string {
+ return `{{.type}} items must be unique`
+}
+
+func (l DefaultLocale) ArrayMinProperties() string {
+ return `Must have at least {{.min}} properties`
+}
+
+func (l DefaultLocale) ArrayMaxProperties() string {
+ return `Must have at most {{.max}} properties`
+}
+
+func (l DefaultLocale) AdditionalPropertyNotAllowed() string {
+ return `Additional property {{.property}} is not allowed`
+}
+
+func (l DefaultLocale) InvalidPropertyPattern() string {
+ return `Property "{{.property}}" does not match pattern {{.pattern}}`
+}
+
+func (l DefaultLocale) StringGTE() string {
+ return `String length must be greater than or equal to {{.min}}`
+}
+
+func (l DefaultLocale) StringLTE() string {
+ return `String length must be less than or equal to {{.max}}`
+}
+
+func (l DefaultLocale) DoesNotMatchPattern() string {
+ return `Does not match pattern '{{.pattern}}'`
+}
+
+func (l DefaultLocale) DoesNotMatchFormat() string {
+ return `Does not match format '{{.format}}'`
+}
+
+func (l DefaultLocale) MultipleOf() string {
+ return `Must be a multiple of {{.multiple}}`
+}
+
+func (l DefaultLocale) NumberGTE() string {
+ return `Must be greater than or equal to {{.min}}`
+}
+
+func (l DefaultLocale) NumberGT() string {
+ return `Must be greater than {{.min}}`
+}
+
+func (l DefaultLocale) NumberLTE() string {
+ return `Must be less than or equal to {{.max}}`
+}
+
+func (l DefaultLocale) NumberLT() string {
+ return `Must be less than {{.max}}`
+}
+
+// Schema validators
+func (l DefaultLocale) RegexPattern() string {
+ return `Invalid regex pattern '{{.pattern}}'`
+}
+
+func (l DefaultLocale) GreaterThanZero() string {
+ return `{{.number}} must be strictly greater than 0`
+}
+
+func (l DefaultLocale) MustBeOfA() string {
+ return `{{.x}} must be of a {{.y}}`
+}
+
+func (l DefaultLocale) MustBeOfAn() string {
+ return `{{.x}} must be of an {{.y}}`
+}
+
+func (l DefaultLocale) CannotBeUsedWithout() string {
+ return `{{.x}} cannot be used without {{.y}}`
+}
+
+func (l DefaultLocale) CannotBeGT() string {
+ return `{{.x}} cannot be greater than {{.y}}`
+}
+
+func (l DefaultLocale) MustBeOfType() string {
+ return `{{.key}} must be of type {{.type}}`
+}
+
+func (l DefaultLocale) MustBeValidRegex() string {
+ return `{{.key}} must be a valid regex`
+}
+
+func (l DefaultLocale) MustBeValidFormat() string {
+ return `{{.key}} must be a valid format {{.given}}`
+}
+
+func (l DefaultLocale) MustBeGTEZero() string {
+ return `{{.key}} must be greater than or equal to 0`
+}
+
+func (l DefaultLocale) KeyCannotBeGreaterThan() string {
+ return `{{.key}} cannot be greater than {{.y}}`
+}
+
+func (l DefaultLocale) KeyItemsMustBeOfType() string {
+ return `{{.key}} items must be {{.type}}`
+}
+
+func (l DefaultLocale) KeyItemsMustBeUnique() string {
+ return `{{.key}} items must be unique`
+}
+
+func (l DefaultLocale) ReferenceMustBeCanonical() string {
+ return `Reference {{.reference}} must be canonical`
+}
+
+func (l DefaultLocale) NotAValidType() string {
+ return `has a primitive type that is NOT VALID -- given: {{.given}} Expected valid values are:{{.expected}}`
+}
+
+func (l DefaultLocale) Duplicated() string {
+ return `{{.type}} type is duplicated`
+}
+
+func (l DefaultLocale) HttpBadStatus() string {
+ return `Could not read schema from HTTP, response status is {{.status}}`
+}
+
+// Replacement options: field, description, context, value
+func (l DefaultLocale) ErrorFormat() string {
+ return `{{.field}}: {{.description}}`
+}
+
+//Parse error
+func (l DefaultLocale) ParseError() string {
+ return `Expected: %expected%, given: Invalid JSON`
+}
+
+const (
+ STRING_NUMBER = "number"
+ STRING_ARRAY_OF_STRINGS = "array of strings"
+ STRING_ARRAY_OF_SCHEMAS = "array of schemas"
+ STRING_SCHEMA = "schema"
+ STRING_SCHEMA_OR_ARRAY_OF_STRINGS = "schema or array of strings"
+ STRING_PROPERTIES = "properties"
+ STRING_DEPENDENCY = "dependency"
+ STRING_PROPERTY = "property"
+ STRING_UNDEFINED = "undefined"
+ STRING_CONTEXT_ROOT = "(root)"
+ STRING_ROOT_SCHEMA_PROPERTY = "(root)"
+)
diff --git a/vendor/github.com/xeipuuv/gojsonschema/result.go b/vendor/github.com/xeipuuv/gojsonschema/result.go
new file mode 100644
index 000000000..6ad56ae86
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/result.go
@@ -0,0 +1,172 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonschema
+// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description Result and ResultError implementations.
+//
+// created 01-01-2015
+
+package gojsonschema
+
+import (
+ "fmt"
+ "strings"
+)
+
+type (
+ // ErrorDetails is a map of details specific to each error.
+ // While the values will vary, every error will contain a "field" value
+ ErrorDetails map[string]interface{}
+
+ // ResultError is the interface that library errors must implement
+ ResultError interface {
+ Field() string
+ SetType(string)
+ Type() string
+ SetContext(*jsonContext)
+ Context() *jsonContext
+ SetDescription(string)
+ Description() string
+ SetValue(interface{})
+ Value() interface{}
+ SetDetails(ErrorDetails)
+ Details() ErrorDetails
+ String() string
+ }
+
+ // ResultErrorFields holds the fields for each ResultError implementation.
+ // ResultErrorFields implements the ResultError interface, so custom errors
+ // can be defined by just embedding this type
+ ResultErrorFields struct {
+ errorType string // A string with the type of error (i.e. invalid_type)
+ context *jsonContext // Tree like notation of the part that failed the validation. ex (root).a.b ...
+ description string // A human readable error message
+ value interface{} // Value given by the JSON file that is the source of the error
+ details ErrorDetails
+ }
+
+ Result struct {
+ errors []ResultError
+ // Scores how well the validation matched. Useful in generating
+ // better error messages for anyOf and oneOf.
+ score int
+ }
+)
+
+// Field outputs the field name without the root context
+// i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName
+func (v *ResultErrorFields) Field() string {
+ if p, ok := v.Details()["property"]; ok {
+ if str, isString := p.(string); isString {
+ return str
+ }
+ }
+
+ return strings.TrimPrefix(v.context.String(), STRING_ROOT_SCHEMA_PROPERTY+".")
+}
+
+func (v *ResultErrorFields) SetType(errorType string) {
+ v.errorType = errorType
+}
+
+func (v *ResultErrorFields) Type() string {
+ return v.errorType
+}
+
+func (v *ResultErrorFields) SetContext(context *jsonContext) {
+ v.context = context
+}
+
+func (v *ResultErrorFields) Context() *jsonContext {
+ return v.context
+}
+
+func (v *ResultErrorFields) SetDescription(description string) {
+ v.description = description
+}
+
+func (v *ResultErrorFields) Description() string {
+ return v.description
+}
+
+func (v *ResultErrorFields) SetValue(value interface{}) {
+ v.value = value
+}
+
+func (v *ResultErrorFields) Value() interface{} {
+ return v.value
+}
+
+func (v *ResultErrorFields) SetDetails(details ErrorDetails) {
+ v.details = details
+}
+
+func (v *ResultErrorFields) Details() ErrorDetails {
+ return v.details
+}
+
+func (v ResultErrorFields) String() string {
+ // as a fallback, the value is displayed go style
+ valueString := fmt.Sprintf("%v", v.value)
+
+ // marshal the go value value to json
+ if v.value == nil {
+ valueString = TYPE_NULL
+ } else {
+ if vs, err := marshalToJsonString(v.value); err == nil {
+ if vs == nil {
+ valueString = TYPE_NULL
+ } else {
+ valueString = *vs
+ }
+ }
+ }
+
+ return formatErrorDescription(Locale.ErrorFormat(), ErrorDetails{
+ "context": v.context.String(),
+ "description": v.description,
+ "value": valueString,
+ "field": v.Field(),
+ })
+}
+
+func (v *Result) Valid() bool {
+ return len(v.errors) == 0
+}
+
+func (v *Result) Errors() []ResultError {
+ return v.errors
+}
+
+func (v *Result) addError(err ResultError, context *jsonContext, value interface{}, details ErrorDetails) {
+ newError(err, context, value, Locale, details)
+ v.errors = append(v.errors, err)
+ v.score -= 2 // results in a net -1 when added to the +1 we get at the end of the validation function
+}
+
+// Used to copy errors from a sub-schema to the main one
+func (v *Result) mergeErrors(otherResult *Result) {
+ v.errors = append(v.errors, otherResult.Errors()...)
+ v.score += otherResult.score
+}
+
+func (v *Result) incrementScore() {
+ v.score++
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/schema.go b/vendor/github.com/xeipuuv/gojsonschema/schema.go
new file mode 100644
index 000000000..2cac71e9b
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/schema.go
@@ -0,0 +1,928 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonschema
+// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description Defines Schema, the main entry to every subSchema.
+// Contains the parsing logic and error checking.
+//
+// created 26-02-2013
+
+package gojsonschema
+
+import (
+ // "encoding/json"
+ "errors"
+ "reflect"
+ "regexp"
+ "text/template"
+
+ "github.com/xeipuuv/gojsonreference"
+)
+
+var (
+ // Locale is the default locale to use
+ // Library users can overwrite with their own implementation
+ Locale locale = DefaultLocale{}
+
+ // ErrorTemplateFuncs allows you to define custom template funcs for use in localization.
+ ErrorTemplateFuncs template.FuncMap
+)
+
+func NewSchema(l JSONLoader) (*Schema, error) {
+ ref, err := l.JsonReference()
+ if err != nil {
+ return nil, err
+ }
+
+ d := Schema{}
+ d.pool = newSchemaPool(l.LoaderFactory())
+ d.documentReference = ref
+ d.referencePool = newSchemaReferencePool()
+
+ var doc interface{}
+ if ref.String() != "" {
+ // Get document from schema pool
+ spd, err := d.pool.GetDocument(d.documentReference)
+ if err != nil {
+ return nil, err
+ }
+ doc = spd.Document
+ } else {
+ // Load JSON directly
+ doc, err = l.LoadJSON()
+ if err != nil {
+ return nil, err
+ }
+ d.pool.SetStandaloneDocument(doc)
+ }
+
+ err = d.parse(doc)
+ if err != nil {
+ return nil, err
+ }
+
+ return &d, nil
+}
+
+type Schema struct {
+ documentReference gojsonreference.JsonReference
+ rootSchema *subSchema
+ pool *schemaPool
+ referencePool *schemaReferencePool
+}
+
+func (d *Schema) parse(document interface{}) error {
+ d.rootSchema = &subSchema{property: STRING_ROOT_SCHEMA_PROPERTY}
+ return d.parseSchema(document, d.rootSchema)
+}
+
+func (d *Schema) SetRootSchemaName(name string) {
+ d.rootSchema.property = name
+}
+
+// Parses a subSchema
+//
+// Pretty long function ( sorry :) )... but pretty straight forward, repetitive and boring
+// Not much magic involved here, most of the job is to validate the key names and their values,
+// then the values are copied into subSchema struct
+//
+func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema) error {
+
+ if !isKind(documentNode, reflect.Map) {
+ return errors.New(formatErrorDescription(
+ Locale.ParseError(),
+ ErrorDetails{
+ "expected": STRING_SCHEMA,
+ },
+ ))
+ }
+
+ m := documentNode.(map[string]interface{})
+
+ if currentSchema == d.rootSchema {
+ currentSchema.ref = &d.documentReference
+ }
+
+ // $subSchema
+ if existsMapKey(m, KEY_SCHEMA) {
+ if !isKind(m[KEY_SCHEMA], reflect.String) {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": TYPE_STRING,
+ "given": KEY_SCHEMA,
+ },
+ ))
+ }
+ schemaRef := m[KEY_SCHEMA].(string)
+ schemaReference, err := gojsonreference.NewJsonReference(schemaRef)
+ currentSchema.subSchema = &schemaReference
+ if err != nil {
+ return err
+ }
+ }
+
+ // $ref
+ if existsMapKey(m, KEY_REF) && !isKind(m[KEY_REF], reflect.String) {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": TYPE_STRING,
+ "given": KEY_REF,
+ },
+ ))
+ }
+ if k, ok := m[KEY_REF].(string); ok {
+
+ jsonReference, err := gojsonreference.NewJsonReference(k)
+ if err != nil {
+ return err
+ }
+
+ if jsonReference.HasFullUrl {
+ currentSchema.ref = &jsonReference
+ } else {
+ inheritedReference, err := currentSchema.ref.Inherits(jsonReference)
+ if err != nil {
+ return err
+ }
+
+ currentSchema.ref = inheritedReference
+ }
+
+ if sch, ok := d.referencePool.Get(currentSchema.ref.String() + k); ok {
+ currentSchema.refSchema = sch
+
+ } else {
+ err := d.parseReference(documentNode, currentSchema, k)
+ if err != nil {
+ return err
+ }
+
+ return nil
+ }
+ }
+
+ // definitions
+ if existsMapKey(m, KEY_DEFINITIONS) {
+ if isKind(m[KEY_DEFINITIONS], reflect.Map) {
+ currentSchema.definitions = make(map[string]*subSchema)
+ for dk, dv := range m[KEY_DEFINITIONS].(map[string]interface{}) {
+ if isKind(dv, reflect.Map) {
+ newSchema := &subSchema{property: KEY_DEFINITIONS, parent: currentSchema, ref: currentSchema.ref}
+ currentSchema.definitions[dk] = newSchema
+ err := d.parseSchema(dv, newSchema)
+ if err != nil {
+ return errors.New(err.Error())
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": STRING_ARRAY_OF_SCHEMAS,
+ "given": KEY_DEFINITIONS,
+ },
+ ))
+ }
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": STRING_ARRAY_OF_SCHEMAS,
+ "given": KEY_DEFINITIONS,
+ },
+ ))
+ }
+
+ }
+
+ // id
+ if existsMapKey(m, KEY_ID) && !isKind(m[KEY_ID], reflect.String) {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": TYPE_STRING,
+ "given": KEY_ID,
+ },
+ ))
+ }
+ if k, ok := m[KEY_ID].(string); ok {
+ currentSchema.id = &k
+ }
+
+ // title
+ if existsMapKey(m, KEY_TITLE) && !isKind(m[KEY_TITLE], reflect.String) {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": TYPE_STRING,
+ "given": KEY_TITLE,
+ },
+ ))
+ }
+ if k, ok := m[KEY_TITLE].(string); ok {
+ currentSchema.title = &k
+ }
+
+ // description
+ if existsMapKey(m, KEY_DESCRIPTION) && !isKind(m[KEY_DESCRIPTION], reflect.String) {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": TYPE_STRING,
+ "given": KEY_DESCRIPTION,
+ },
+ ))
+ }
+ if k, ok := m[KEY_DESCRIPTION].(string); ok {
+ currentSchema.description = &k
+ }
+
+ // type
+ if existsMapKey(m, KEY_TYPE) {
+ if isKind(m[KEY_TYPE], reflect.String) {
+ if k, ok := m[KEY_TYPE].(string); ok {
+ err := currentSchema.types.Add(k)
+ if err != nil {
+ return err
+ }
+ }
+ } else {
+ if isKind(m[KEY_TYPE], reflect.Slice) {
+ arrayOfTypes := m[KEY_TYPE].([]interface{})
+ for _, typeInArray := range arrayOfTypes {
+ if reflect.ValueOf(typeInArray).Kind() != reflect.String {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS,
+ "given": KEY_TYPE,
+ },
+ ))
+ } else {
+ currentSchema.types.Add(typeInArray.(string))
+ }
+ }
+
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS,
+ "given": KEY_TYPE,
+ },
+ ))
+ }
+ }
+ }
+
+ // properties
+ if existsMapKey(m, KEY_PROPERTIES) {
+ err := d.parseProperties(m[KEY_PROPERTIES], currentSchema)
+ if err != nil {
+ return err
+ }
+ }
+
+ // additionalProperties
+ if existsMapKey(m, KEY_ADDITIONAL_PROPERTIES) {
+ if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Bool) {
+ currentSchema.additionalProperties = m[KEY_ADDITIONAL_PROPERTIES].(bool)
+ } else if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Map) {
+ newSchema := &subSchema{property: KEY_ADDITIONAL_PROPERTIES, parent: currentSchema, ref: currentSchema.ref}
+ currentSchema.additionalProperties = newSchema
+ err := d.parseSchema(m[KEY_ADDITIONAL_PROPERTIES], newSchema)
+ if err != nil {
+ return errors.New(err.Error())
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA,
+ "given": KEY_ADDITIONAL_PROPERTIES,
+ },
+ ))
+ }
+ }
+
+ // patternProperties
+ if existsMapKey(m, KEY_PATTERN_PROPERTIES) {
+ if isKind(m[KEY_PATTERN_PROPERTIES], reflect.Map) {
+ patternPropertiesMap := m[KEY_PATTERN_PROPERTIES].(map[string]interface{})
+ if len(patternPropertiesMap) > 0 {
+ currentSchema.patternProperties = make(map[string]*subSchema)
+ for k, v := range patternPropertiesMap {
+ _, err := regexp.MatchString(k, "")
+ if err != nil {
+ return errors.New(formatErrorDescription(
+ Locale.RegexPattern(),
+ ErrorDetails{"pattern": k},
+ ))
+ }
+ newSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref}
+ err = d.parseSchema(v, newSchema)
+ if err != nil {
+ return errors.New(err.Error())
+ }
+ currentSchema.patternProperties[k] = newSchema
+ }
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": STRING_SCHEMA,
+ "given": KEY_PATTERN_PROPERTIES,
+ },
+ ))
+ }
+ }
+
+ // dependencies
+ if existsMapKey(m, KEY_DEPENDENCIES) {
+ err := d.parseDependencies(m[KEY_DEPENDENCIES], currentSchema)
+ if err != nil {
+ return err
+ }
+ }
+
+ // items
+ if existsMapKey(m, KEY_ITEMS) {
+ if isKind(m[KEY_ITEMS], reflect.Slice) {
+ for _, itemElement := range m[KEY_ITEMS].([]interface{}) {
+ if isKind(itemElement, reflect.Map) {
+ newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS}
+ newSchema.ref = currentSchema.ref
+ currentSchema.AddItemsChild(newSchema)
+ err := d.parseSchema(itemElement, newSchema)
+ if err != nil {
+ return err
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS,
+ "given": KEY_ITEMS,
+ },
+ ))
+ }
+ currentSchema.itemsChildrenIsSingleSchema = false
+ }
+ } else if isKind(m[KEY_ITEMS], reflect.Map) {
+ newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS}
+ newSchema.ref = currentSchema.ref
+ currentSchema.AddItemsChild(newSchema)
+ err := d.parseSchema(m[KEY_ITEMS], newSchema)
+ if err != nil {
+ return err
+ }
+ currentSchema.itemsChildrenIsSingleSchema = true
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS,
+ "given": KEY_ITEMS,
+ },
+ ))
+ }
+ }
+
+ // additionalItems
+ if existsMapKey(m, KEY_ADDITIONAL_ITEMS) {
+ if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Bool) {
+ currentSchema.additionalItems = m[KEY_ADDITIONAL_ITEMS].(bool)
+ } else if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Map) {
+ newSchema := &subSchema{property: KEY_ADDITIONAL_ITEMS, parent: currentSchema, ref: currentSchema.ref}
+ currentSchema.additionalItems = newSchema
+ err := d.parseSchema(m[KEY_ADDITIONAL_ITEMS], newSchema)
+ if err != nil {
+ return errors.New(err.Error())
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA,
+ "given": KEY_ADDITIONAL_ITEMS,
+ },
+ ))
+ }
+ }
+
+ // validation : number / integer
+
+ if existsMapKey(m, KEY_MULTIPLE_OF) {
+ multipleOfValue := mustBeNumber(m[KEY_MULTIPLE_OF])
+ if multipleOfValue == nil {
+ return errors.New(formatErrorDescription(
+ Locale.InvalidType(),
+ ErrorDetails{
+ "expected": STRING_NUMBER,
+ "given": KEY_MULTIPLE_OF,
+ },
+ ))
+ }
+ if *multipleOfValue <= 0 {
+ return errors.New(formatErrorDescription(
+ Locale.GreaterThanZero(),
+ ErrorDetails{"number": KEY_MULTIPLE_OF},
+ ))
+ }
+ currentSchema.multipleOf = multipleOfValue
+ }
+
+ if existsMapKey(m, KEY_MINIMUM) {
+ minimumValue := mustBeNumber(m[KEY_MINIMUM])
+ if minimumValue == nil {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfA(),
+ ErrorDetails{"x": KEY_MINIMUM, "y": STRING_NUMBER},
+ ))
+ }
+ currentSchema.minimum = minimumValue
+ }
+
+ if existsMapKey(m, KEY_EXCLUSIVE_MINIMUM) {
+ if isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) {
+ if currentSchema.minimum == nil {
+ return errors.New(formatErrorDescription(
+ Locale.CannotBeUsedWithout(),
+ ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM},
+ ))
+ }
+ exclusiveMinimumValue := m[KEY_EXCLUSIVE_MINIMUM].(bool)
+ currentSchema.exclusiveMinimum = exclusiveMinimumValue
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfA(),
+ ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": TYPE_BOOLEAN},
+ ))
+ }
+ }
+
+ if existsMapKey(m, KEY_MAXIMUM) {
+ maximumValue := mustBeNumber(m[KEY_MAXIMUM])
+ if maximumValue == nil {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfA(),
+ ErrorDetails{"x": KEY_MAXIMUM, "y": STRING_NUMBER},
+ ))
+ }
+ currentSchema.maximum = maximumValue
+ }
+
+ if existsMapKey(m, KEY_EXCLUSIVE_MAXIMUM) {
+ if isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) {
+ if currentSchema.maximum == nil {
+ return errors.New(formatErrorDescription(
+ Locale.CannotBeUsedWithout(),
+ ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM},
+ ))
+ }
+ exclusiveMaximumValue := m[KEY_EXCLUSIVE_MAXIMUM].(bool)
+ currentSchema.exclusiveMaximum = exclusiveMaximumValue
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfA(),
+ ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": STRING_NUMBER},
+ ))
+ }
+ }
+
+ if currentSchema.minimum != nil && currentSchema.maximum != nil {
+ if *currentSchema.minimum > *currentSchema.maximum {
+ return errors.New(formatErrorDescription(
+ Locale.CannotBeGT(),
+ ErrorDetails{"x": KEY_MINIMUM, "y": KEY_MAXIMUM},
+ ))
+ }
+ }
+
+ // validation : string
+
+ if existsMapKey(m, KEY_MIN_LENGTH) {
+ minLengthIntegerValue := mustBeInteger(m[KEY_MIN_LENGTH])
+ if minLengthIntegerValue == nil {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_MIN_LENGTH, "y": TYPE_INTEGER},
+ ))
+ }
+ if *minLengthIntegerValue < 0 {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeGTEZero(),
+ ErrorDetails{"key": KEY_MIN_LENGTH},
+ ))
+ }
+ currentSchema.minLength = minLengthIntegerValue
+ }
+
+ if existsMapKey(m, KEY_MAX_LENGTH) {
+ maxLengthIntegerValue := mustBeInteger(m[KEY_MAX_LENGTH])
+ if maxLengthIntegerValue == nil {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_MAX_LENGTH, "y": TYPE_INTEGER},
+ ))
+ }
+ if *maxLengthIntegerValue < 0 {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeGTEZero(),
+ ErrorDetails{"key": KEY_MAX_LENGTH},
+ ))
+ }
+ currentSchema.maxLength = maxLengthIntegerValue
+ }
+
+ if currentSchema.minLength != nil && currentSchema.maxLength != nil {
+ if *currentSchema.minLength > *currentSchema.maxLength {
+ return errors.New(formatErrorDescription(
+ Locale.CannotBeGT(),
+ ErrorDetails{"x": KEY_MIN_LENGTH, "y": KEY_MAX_LENGTH},
+ ))
+ }
+ }
+
+ if existsMapKey(m, KEY_PATTERN) {
+ if isKind(m[KEY_PATTERN], reflect.String) {
+ regexpObject, err := regexp.Compile(m[KEY_PATTERN].(string))
+ if err != nil {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeValidRegex(),
+ ErrorDetails{"key": KEY_PATTERN},
+ ))
+ }
+ currentSchema.pattern = regexpObject
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfA(),
+ ErrorDetails{"x": KEY_PATTERN, "y": TYPE_STRING},
+ ))
+ }
+ }
+
+ if existsMapKey(m, KEY_FORMAT) {
+ formatString, ok := m[KEY_FORMAT].(string)
+ if ok && FormatCheckers.Has(formatString) {
+ currentSchema.format = formatString
+ }
+ }
+
+ // validation : object
+
+ if existsMapKey(m, KEY_MIN_PROPERTIES) {
+ minPropertiesIntegerValue := mustBeInteger(m[KEY_MIN_PROPERTIES])
+ if minPropertiesIntegerValue == nil {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_MIN_PROPERTIES, "y": TYPE_INTEGER},
+ ))
+ }
+ if *minPropertiesIntegerValue < 0 {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeGTEZero(),
+ ErrorDetails{"key": KEY_MIN_PROPERTIES},
+ ))
+ }
+ currentSchema.minProperties = minPropertiesIntegerValue
+ }
+
+ if existsMapKey(m, KEY_MAX_PROPERTIES) {
+ maxPropertiesIntegerValue := mustBeInteger(m[KEY_MAX_PROPERTIES])
+ if maxPropertiesIntegerValue == nil {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_MAX_PROPERTIES, "y": TYPE_INTEGER},
+ ))
+ }
+ if *maxPropertiesIntegerValue < 0 {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeGTEZero(),
+ ErrorDetails{"key": KEY_MAX_PROPERTIES},
+ ))
+ }
+ currentSchema.maxProperties = maxPropertiesIntegerValue
+ }
+
+ if currentSchema.minProperties != nil && currentSchema.maxProperties != nil {
+ if *currentSchema.minProperties > *currentSchema.maxProperties {
+ return errors.New(formatErrorDescription(
+ Locale.KeyCannotBeGreaterThan(),
+ ErrorDetails{"key": KEY_MIN_PROPERTIES, "y": KEY_MAX_PROPERTIES},
+ ))
+ }
+ }
+
+ if existsMapKey(m, KEY_REQUIRED) {
+ if isKind(m[KEY_REQUIRED], reflect.Slice) {
+ requiredValues := m[KEY_REQUIRED].([]interface{})
+ for _, requiredValue := range requiredValues {
+ if isKind(requiredValue, reflect.String) {
+ err := currentSchema.AddRequired(requiredValue.(string))
+ if err != nil {
+ return err
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.KeyItemsMustBeOfType(),
+ ErrorDetails{"key": KEY_REQUIRED, "type": TYPE_STRING},
+ ))
+ }
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_REQUIRED, "y": TYPE_ARRAY},
+ ))
+ }
+ }
+
+ // validation : array
+
+ if existsMapKey(m, KEY_MIN_ITEMS) {
+ minItemsIntegerValue := mustBeInteger(m[KEY_MIN_ITEMS])
+ if minItemsIntegerValue == nil {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_MIN_ITEMS, "y": TYPE_INTEGER},
+ ))
+ }
+ if *minItemsIntegerValue < 0 {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeGTEZero(),
+ ErrorDetails{"key": KEY_MIN_ITEMS},
+ ))
+ }
+ currentSchema.minItems = minItemsIntegerValue
+ }
+
+ if existsMapKey(m, KEY_MAX_ITEMS) {
+ maxItemsIntegerValue := mustBeInteger(m[KEY_MAX_ITEMS])
+ if maxItemsIntegerValue == nil {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_MAX_ITEMS, "y": TYPE_INTEGER},
+ ))
+ }
+ if *maxItemsIntegerValue < 0 {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeGTEZero(),
+ ErrorDetails{"key": KEY_MAX_ITEMS},
+ ))
+ }
+ currentSchema.maxItems = maxItemsIntegerValue
+ }
+
+ if existsMapKey(m, KEY_UNIQUE_ITEMS) {
+ if isKind(m[KEY_UNIQUE_ITEMS], reflect.Bool) {
+ currentSchema.uniqueItems = m[KEY_UNIQUE_ITEMS].(bool)
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfA(),
+ ErrorDetails{"x": KEY_UNIQUE_ITEMS, "y": TYPE_BOOLEAN},
+ ))
+ }
+ }
+
+ // validation : all
+
+ if existsMapKey(m, KEY_ENUM) {
+ if isKind(m[KEY_ENUM], reflect.Slice) {
+ for _, v := range m[KEY_ENUM].([]interface{}) {
+ err := currentSchema.AddEnum(v)
+ if err != nil {
+ return err
+ }
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_ENUM, "y": TYPE_ARRAY},
+ ))
+ }
+ }
+
+ // validation : subSchema
+
+ if existsMapKey(m, KEY_ONE_OF) {
+ if isKind(m[KEY_ONE_OF], reflect.Slice) {
+ for _, v := range m[KEY_ONE_OF].([]interface{}) {
+ newSchema := &subSchema{property: KEY_ONE_OF, parent: currentSchema, ref: currentSchema.ref}
+ currentSchema.AddOneOf(newSchema)
+ err := d.parseSchema(v, newSchema)
+ if err != nil {
+ return err
+ }
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_ONE_OF, "y": TYPE_ARRAY},
+ ))
+ }
+ }
+
+ if existsMapKey(m, KEY_ANY_OF) {
+ if isKind(m[KEY_ANY_OF], reflect.Slice) {
+ for _, v := range m[KEY_ANY_OF].([]interface{}) {
+ newSchema := &subSchema{property: KEY_ANY_OF, parent: currentSchema, ref: currentSchema.ref}
+ currentSchema.AddAnyOf(newSchema)
+ err := d.parseSchema(v, newSchema)
+ if err != nil {
+ return err
+ }
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY},
+ ))
+ }
+ }
+
+ if existsMapKey(m, KEY_ALL_OF) {
+ if isKind(m[KEY_ALL_OF], reflect.Slice) {
+ for _, v := range m[KEY_ALL_OF].([]interface{}) {
+ newSchema := &subSchema{property: KEY_ALL_OF, parent: currentSchema, ref: currentSchema.ref}
+ currentSchema.AddAllOf(newSchema)
+ err := d.parseSchema(v, newSchema)
+ if err != nil {
+ return err
+ }
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY},
+ ))
+ }
+ }
+
+ if existsMapKey(m, KEY_NOT) {
+ if isKind(m[KEY_NOT], reflect.Map) {
+ newSchema := &subSchema{property: KEY_NOT, parent: currentSchema, ref: currentSchema.ref}
+ currentSchema.SetNot(newSchema)
+ err := d.parseSchema(m[KEY_NOT], newSchema)
+ if err != nil {
+ return err
+ }
+ } else {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfAn(),
+ ErrorDetails{"x": KEY_NOT, "y": TYPE_OBJECT},
+ ))
+ }
+ }
+
+ return nil
+}
+
+func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSchema, reference string) error {
+ var refdDocumentNode interface{}
+ jsonPointer := currentSchema.ref.GetPointer()
+ standaloneDocument := d.pool.GetStandaloneDocument()
+
+ if standaloneDocument != nil {
+
+ var err error
+ refdDocumentNode, _, err = jsonPointer.Get(standaloneDocument)
+ if err != nil {
+ return err
+ }
+
+ } else {
+ dsp, err := d.pool.GetDocument(*currentSchema.ref)
+ if err != nil {
+ return err
+ }
+
+ refdDocumentNode, _, err = jsonPointer.Get(dsp.Document)
+ if err != nil {
+ return err
+ }
+
+ }
+
+ if !isKind(refdDocumentNode, reflect.Map) {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfType(),
+ ErrorDetails{"key": STRING_SCHEMA, "type": TYPE_OBJECT},
+ ))
+ }
+
+ // returns the loaded referenced subSchema for the caller to update its current subSchema
+ newSchemaDocument := refdDocumentNode.(map[string]interface{})
+ newSchema := &subSchema{property: KEY_REF, parent: currentSchema, ref: currentSchema.ref}
+ d.referencePool.Add(currentSchema.ref.String()+reference, newSchema)
+
+ err := d.parseSchema(newSchemaDocument, newSchema)
+ if err != nil {
+ return err
+ }
+
+ currentSchema.refSchema = newSchema
+
+ return nil
+
+}
+
+func (d *Schema) parseProperties(documentNode interface{}, currentSchema *subSchema) error {
+
+ if !isKind(documentNode, reflect.Map) {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfType(),
+ ErrorDetails{"key": STRING_PROPERTIES, "type": TYPE_OBJECT},
+ ))
+ }
+
+ m := documentNode.(map[string]interface{})
+ for k := range m {
+ schemaProperty := k
+ newSchema := &subSchema{property: schemaProperty, parent: currentSchema, ref: currentSchema.ref}
+ currentSchema.AddPropertiesChild(newSchema)
+ err := d.parseSchema(m[k], newSchema)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *Schema) parseDependencies(documentNode interface{}, currentSchema *subSchema) error {
+
+ if !isKind(documentNode, reflect.Map) {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfType(),
+ ErrorDetails{"key": KEY_DEPENDENCIES, "type": TYPE_OBJECT},
+ ))
+ }
+
+ m := documentNode.(map[string]interface{})
+ currentSchema.dependencies = make(map[string]interface{})
+
+ for k := range m {
+ switch reflect.ValueOf(m[k]).Kind() {
+
+ case reflect.Slice:
+ values := m[k].([]interface{})
+ var valuesToRegister []string
+
+ for _, value := range values {
+ if !isKind(value, reflect.String) {
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfType(),
+ ErrorDetails{
+ "key": STRING_DEPENDENCY,
+ "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS,
+ },
+ ))
+ } else {
+ valuesToRegister = append(valuesToRegister, value.(string))
+ }
+ currentSchema.dependencies[k] = valuesToRegister
+ }
+
+ case reflect.Map:
+ depSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref}
+ err := d.parseSchema(m[k], depSchema)
+ if err != nil {
+ return err
+ }
+ currentSchema.dependencies[k] = depSchema
+
+ default:
+ return errors.New(formatErrorDescription(
+ Locale.MustBeOfType(),
+ ErrorDetails{
+ "key": STRING_DEPENDENCY,
+ "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS,
+ },
+ ))
+ }
+
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go b/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go
new file mode 100644
index 000000000..f2ad641af
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go
@@ -0,0 +1,109 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonschema
+// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description Defines resources pooling.
+// Eases referencing and avoids downloading the same resource twice.
+//
+// created 26-02-2013
+
+package gojsonschema
+
+import (
+ "errors"
+
+ "github.com/xeipuuv/gojsonreference"
+)
+
+type schemaPoolDocument struct {
+ Document interface{}
+}
+
+type schemaPool struct {
+ schemaPoolDocuments map[string]*schemaPoolDocument
+ standaloneDocument interface{}
+ jsonLoaderFactory JSONLoaderFactory
+}
+
+func newSchemaPool(f JSONLoaderFactory) *schemaPool {
+
+ p := &schemaPool{}
+ p.schemaPoolDocuments = make(map[string]*schemaPoolDocument)
+ p.standaloneDocument = nil
+ p.jsonLoaderFactory = f
+
+ return p
+}
+
+func (p *schemaPool) SetStandaloneDocument(document interface{}) {
+ p.standaloneDocument = document
+}
+
+func (p *schemaPool) GetStandaloneDocument() (document interface{}) {
+ return p.standaloneDocument
+}
+
+func (p *schemaPool) GetDocument(reference gojsonreference.JsonReference) (*schemaPoolDocument, error) {
+
+ if internalLogEnabled {
+ internalLog("Get Document ( %s )", reference.String())
+ }
+
+ var err error
+
+ // It is not possible to load anything that is not canonical...
+ if !reference.IsCanonical() {
+ return nil, errors.New(formatErrorDescription(
+ Locale.ReferenceMustBeCanonical(),
+ ErrorDetails{"reference": reference},
+ ))
+ }
+
+ refToUrl := reference
+ refToUrl.GetUrl().Fragment = ""
+
+ var spd *schemaPoolDocument
+
+ // Try to find the requested document in the pool
+ for k := range p.schemaPoolDocuments {
+ if k == refToUrl.String() {
+ spd = p.schemaPoolDocuments[k]
+ }
+ }
+
+ if spd != nil {
+ if internalLogEnabled {
+ internalLog(" From pool")
+ }
+ return spd, nil
+ }
+
+ jsonReferenceLoader := p.jsonLoaderFactory.New(reference.String())
+ document, err := jsonReferenceLoader.LoadJSON()
+ if err != nil {
+ return nil, err
+ }
+
+ spd = &schemaPoolDocument{Document: document}
+ // add the document to the pool for potential later use
+ p.schemaPoolDocuments[refToUrl.String()] = spd
+
+ return spd, nil
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go b/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go
new file mode 100644
index 000000000..294e36a73
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go
@@ -0,0 +1,67 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonschema
+// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description Pool of referenced schemas.
+//
+// created 25-06-2013
+
+package gojsonschema
+
+import (
+ "fmt"
+)
+
+type schemaReferencePool struct {
+ documents map[string]*subSchema
+}
+
+func newSchemaReferencePool() *schemaReferencePool {
+
+ p := &schemaReferencePool{}
+ p.documents = make(map[string]*subSchema)
+
+ return p
+}
+
+func (p *schemaReferencePool) Get(ref string) (r *subSchema, o bool) {
+
+ if internalLogEnabled {
+ internalLog(fmt.Sprintf("Schema Reference ( %s )", ref))
+ }
+
+ if sch, ok := p.documents[ref]; ok {
+ if internalLogEnabled {
+ internalLog(fmt.Sprintf(" From pool"))
+ }
+ return sch, true
+ }
+
+ return nil, false
+}
+
+func (p *schemaReferencePool) Add(ref string, sch *subSchema) {
+
+ if internalLogEnabled {
+ internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref))
+ }
+
+ p.documents[ref] = sch
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaType.go b/vendor/github.com/xeipuuv/gojsonschema/schemaType.go
new file mode 100644
index 000000000..36b447a29
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/schemaType.go
@@ -0,0 +1,83 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonschema
+// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description Helper structure to handle schema types, and the combination of them.
+//
+// created 28-02-2013
+
+package gojsonschema
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+type jsonSchemaType struct {
+ types []string
+}
+
+// Is the schema typed ? that is containing at least one type
+// When not typed, the schema does not need any type validation
+func (t *jsonSchemaType) IsTyped() bool {
+ return len(t.types) > 0
+}
+
+func (t *jsonSchemaType) Add(etype string) error {
+
+ if !isStringInSlice(JSON_TYPES, etype) {
+ return errors.New(formatErrorDescription(Locale.NotAValidType(), ErrorDetails{"given": "/" + etype + "/", "expected": JSON_TYPES}))
+ }
+
+ if t.Contains(etype) {
+ return errors.New(formatErrorDescription(Locale.Duplicated(), ErrorDetails{"type": etype}))
+ }
+
+ t.types = append(t.types, etype)
+
+ return nil
+}
+
+func (t *jsonSchemaType) Contains(etype string) bool {
+
+ for _, v := range t.types {
+ if v == etype {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (t *jsonSchemaType) String() string {
+
+ if len(t.types) == 0 {
+ return STRING_UNDEFINED // should never happen
+ }
+
+ // Displayed as a list [type1,type2,...]
+ if len(t.types) > 1 {
+ return fmt.Sprintf("[%s]", strings.Join(t.types, ","))
+ }
+
+ // Only one type: name only
+ return t.types[0]
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/subSchema.go b/vendor/github.com/xeipuuv/gojsonschema/subSchema.go
new file mode 100644
index 000000000..9ddbb5fc1
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/subSchema.go
@@ -0,0 +1,227 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonschema
+// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description Defines the structure of a sub-subSchema.
+// A sub-subSchema can contain other sub-schemas.
+//
+// created 27-02-2013
+
+package gojsonschema
+
+import (
+ "errors"
+ "regexp"
+ "strings"
+
+ "github.com/xeipuuv/gojsonreference"
+)
+
+const (
+ KEY_SCHEMA = "$subSchema"
+ KEY_ID = "$id"
+ KEY_REF = "$ref"
+ KEY_TITLE = "title"
+ KEY_DESCRIPTION = "description"
+ KEY_TYPE = "type"
+ KEY_ITEMS = "items"
+ KEY_ADDITIONAL_ITEMS = "additionalItems"
+ KEY_PROPERTIES = "properties"
+ KEY_PATTERN_PROPERTIES = "patternProperties"
+ KEY_ADDITIONAL_PROPERTIES = "additionalProperties"
+ KEY_DEFINITIONS = "definitions"
+ KEY_MULTIPLE_OF = "multipleOf"
+ KEY_MINIMUM = "minimum"
+ KEY_MAXIMUM = "maximum"
+ KEY_EXCLUSIVE_MINIMUM = "exclusiveMinimum"
+ KEY_EXCLUSIVE_MAXIMUM = "exclusiveMaximum"
+ KEY_MIN_LENGTH = "minLength"
+ KEY_MAX_LENGTH = "maxLength"
+ KEY_PATTERN = "pattern"
+ KEY_FORMAT = "format"
+ KEY_MIN_PROPERTIES = "minProperties"
+ KEY_MAX_PROPERTIES = "maxProperties"
+ KEY_DEPENDENCIES = "dependencies"
+ KEY_REQUIRED = "required"
+ KEY_MIN_ITEMS = "minItems"
+ KEY_MAX_ITEMS = "maxItems"
+ KEY_UNIQUE_ITEMS = "uniqueItems"
+ KEY_ENUM = "enum"
+ KEY_ONE_OF = "oneOf"
+ KEY_ANY_OF = "anyOf"
+ KEY_ALL_OF = "allOf"
+ KEY_NOT = "not"
+)
+
+type subSchema struct {
+
+ // basic subSchema meta properties
+ id *string
+ title *string
+ description *string
+
+ property string
+
+ // Types associated with the subSchema
+ types jsonSchemaType
+
+ // Reference url
+ ref *gojsonreference.JsonReference
+ // Schema referenced
+ refSchema *subSchema
+ // Json reference
+ subSchema *gojsonreference.JsonReference
+
+ // hierarchy
+ parent *subSchema
+ definitions map[string]*subSchema
+ definitionsChildren []*subSchema
+ itemsChildren []*subSchema
+ itemsChildrenIsSingleSchema bool
+ propertiesChildren []*subSchema
+
+ // validation : number / integer
+ multipleOf *float64
+ maximum *float64
+ exclusiveMaximum bool
+ minimum *float64
+ exclusiveMinimum bool
+
+ // validation : string
+ minLength *int
+ maxLength *int
+ pattern *regexp.Regexp
+ format string
+
+ // validation : object
+ minProperties *int
+ maxProperties *int
+ required []string
+
+ dependencies map[string]interface{}
+ additionalProperties interface{}
+ patternProperties map[string]*subSchema
+
+ // validation : array
+ minItems *int
+ maxItems *int
+ uniqueItems bool
+
+ additionalItems interface{}
+
+ // validation : all
+ enum []string
+
+ // validation : subSchema
+ oneOf []*subSchema
+ anyOf []*subSchema
+ allOf []*subSchema
+ not *subSchema
+}
+
+func (s *subSchema) AddEnum(i interface{}) error {
+
+ is, err := marshalToJsonString(i)
+ if err != nil {
+ return err
+ }
+
+ if isStringInSlice(s.enum, *is) {
+ return errors.New(formatErrorDescription(
+ Locale.KeyItemsMustBeUnique(),
+ ErrorDetails{"key": KEY_ENUM},
+ ))
+ }
+
+ s.enum = append(s.enum, *is)
+
+ return nil
+}
+
+func (s *subSchema) ContainsEnum(i interface{}) (bool, error) {
+
+ is, err := marshalToJsonString(i)
+ if err != nil {
+ return false, err
+ }
+
+ return isStringInSlice(s.enum, *is), nil
+}
+
+func (s *subSchema) AddOneOf(subSchema *subSchema) {
+ s.oneOf = append(s.oneOf, subSchema)
+}
+
+func (s *subSchema) AddAllOf(subSchema *subSchema) {
+ s.allOf = append(s.allOf, subSchema)
+}
+
+func (s *subSchema) AddAnyOf(subSchema *subSchema) {
+ s.anyOf = append(s.anyOf, subSchema)
+}
+
+func (s *subSchema) SetNot(subSchema *subSchema) {
+ s.not = subSchema
+}
+
+func (s *subSchema) AddRequired(value string) error {
+
+ if isStringInSlice(s.required, value) {
+ return errors.New(formatErrorDescription(
+ Locale.KeyItemsMustBeUnique(),
+ ErrorDetails{"key": KEY_REQUIRED},
+ ))
+ }
+
+ s.required = append(s.required, value)
+
+ return nil
+}
+
+func (s *subSchema) AddDefinitionChild(child *subSchema) {
+ s.definitionsChildren = append(s.definitionsChildren, child)
+}
+
+func (s *subSchema) AddItemsChild(child *subSchema) {
+ s.itemsChildren = append(s.itemsChildren, child)
+}
+
+func (s *subSchema) AddPropertiesChild(child *subSchema) {
+ s.propertiesChildren = append(s.propertiesChildren, child)
+}
+
+func (s *subSchema) PatternPropertiesString() string {
+
+ if s.patternProperties == nil || len(s.patternProperties) == 0 {
+ return STRING_UNDEFINED // should never happen
+ }
+
+ patternPropertiesKeySlice := []string{}
+ for pk := range s.patternProperties {
+ patternPropertiesKeySlice = append(patternPropertiesKeySlice, `"`+pk+`"`)
+ }
+
+ if len(patternPropertiesKeySlice) == 1 {
+ return patternPropertiesKeySlice[0]
+ }
+
+ return "[" + strings.Join(patternPropertiesKeySlice, ",") + "]"
+
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/types.go b/vendor/github.com/xeipuuv/gojsonschema/types.go
new file mode 100644
index 000000000..952d22ef6
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/types.go
@@ -0,0 +1,58 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonschema
+// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description Contains const types for schema and JSON.
+//
+// created 28-02-2013
+
+package gojsonschema
+
+const (
+ TYPE_ARRAY = `array`
+ TYPE_BOOLEAN = `boolean`
+ TYPE_INTEGER = `integer`
+ TYPE_NUMBER = `number`
+ TYPE_NULL = `null`
+ TYPE_OBJECT = `object`
+ TYPE_STRING = `string`
+)
+
+var JSON_TYPES []string
+var SCHEMA_TYPES []string
+
+func init() {
+ JSON_TYPES = []string{
+ TYPE_ARRAY,
+ TYPE_BOOLEAN,
+ TYPE_INTEGER,
+ TYPE_NUMBER,
+ TYPE_NULL,
+ TYPE_OBJECT,
+ TYPE_STRING}
+
+ SCHEMA_TYPES = []string{
+ TYPE_ARRAY,
+ TYPE_BOOLEAN,
+ TYPE_INTEGER,
+ TYPE_NUMBER,
+ TYPE_OBJECT,
+ TYPE_STRING}
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/utils.go b/vendor/github.com/xeipuuv/gojsonschema/utils.go
new file mode 100644
index 000000000..26cf75ebf
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/utils.go
@@ -0,0 +1,208 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonschema
+// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description Various utility functions.
+//
+// created 26-02-2013
+
+package gojsonschema
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+)
+
+func isKind(what interface{}, kind reflect.Kind) bool {
+ target := what
+ if isJsonNumber(what) {
+ // JSON Numbers are strings!
+ target = *mustBeNumber(what)
+ }
+ return reflect.ValueOf(target).Kind() == kind
+}
+
+func existsMapKey(m map[string]interface{}, k string) bool {
+ _, ok := m[k]
+ return ok
+}
+
+func isStringInSlice(s []string, what string) bool {
+ for i := range s {
+ if s[i] == what {
+ return true
+ }
+ }
+ return false
+}
+
+func marshalToJsonString(value interface{}) (*string, error) {
+
+ mBytes, err := json.Marshal(value)
+ if err != nil {
+ return nil, err
+ }
+
+ sBytes := string(mBytes)
+ return &sBytes, nil
+}
+
+func isJsonNumber(what interface{}) bool {
+
+ switch what.(type) {
+
+ case json.Number:
+ return true
+ }
+
+ return false
+}
+
+func checkJsonNumber(what interface{}) (isValidFloat64 bool, isValidInt64 bool, isValidInt32 bool) {
+
+ jsonNumber := what.(json.Number)
+
+ f64, errFloat64 := jsonNumber.Float64()
+ s64 := strconv.FormatFloat(f64, 'f', -1, 64)
+ _, errInt64 := strconv.ParseInt(s64, 10, 64)
+
+ isValidFloat64 = errFloat64 == nil
+ isValidInt64 = errInt64 == nil
+
+ _, errInt32 := strconv.ParseInt(s64, 10, 32)
+ isValidInt32 = isValidInt64 && errInt32 == nil
+
+ return
+
+}
+
+// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER
+const (
+ max_json_float = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1
+ min_json_float = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1
+)
+
+func isFloat64AnInteger(f float64) bool {
+
+ if math.IsNaN(f) || math.IsInf(f, 0) || f < min_json_float || f > max_json_float {
+ return false
+ }
+
+ return f == float64(int64(f)) || f == float64(uint64(f))
+}
+
+func mustBeInteger(what interface{}) *int {
+
+ if isJsonNumber(what) {
+
+ number := what.(json.Number)
+
+ _, _, isValidInt32 := checkJsonNumber(number)
+
+ if isValidInt32 {
+
+ int64Value, err := number.Int64()
+ if err != nil {
+ return nil
+ }
+
+ int32Value := int(int64Value)
+ return &int32Value
+
+ } else {
+ return nil
+ }
+
+ }
+
+ return nil
+}
+
+func mustBeNumber(what interface{}) *float64 {
+
+ if isJsonNumber(what) {
+
+ number := what.(json.Number)
+ float64Value, err := number.Float64()
+
+ if err == nil {
+ return &float64Value
+ } else {
+ return nil
+ }
+
+ }
+
+ return nil
+
+}
+
+// formats a number so that it is displayed as the smallest string possible
+func resultErrorFormatJsonNumber(n json.Number) string {
+
+ if int64Value, err := n.Int64(); err == nil {
+ return fmt.Sprintf("%d", int64Value)
+ }
+
+ float64Value, _ := n.Float64()
+
+ return fmt.Sprintf("%g", float64Value)
+}
+
+// formats a number so that it is displayed as the smallest string possible
+func resultErrorFormatNumber(n float64) string {
+
+ if isFloat64AnInteger(n) {
+ return fmt.Sprintf("%d", int64(n))
+ }
+
+ return fmt.Sprintf("%g", n)
+}
+
+func convertDocumentNode(val interface{}) interface{} {
+
+ if lval, ok := val.([]interface{}); ok {
+
+ res := []interface{}{}
+ for _, v := range lval {
+ res = append(res, convertDocumentNode(v))
+ }
+
+ return res
+
+ }
+
+ if mval, ok := val.(map[interface{}]interface{}); ok {
+
+ res := map[string]interface{}{}
+
+ for k, v := range mval {
+ res[k.(string)] = convertDocumentNode(v)
+ }
+
+ return res
+
+ }
+
+ return val
+}
diff --git a/vendor/github.com/xeipuuv/gojsonschema/validation.go b/vendor/github.com/xeipuuv/gojsonschema/validation.go
new file mode 100644
index 000000000..9afea2518
--- /dev/null
+++ b/vendor/github.com/xeipuuv/gojsonschema/validation.go
@@ -0,0 +1,844 @@
+// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author xeipuuv
+// author-github https://github.com/xeipuuv
+// author-mail xeipuuv@gmail.com
+//
+// repository-name gojsonschema
+// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
+//
+// description Extends Schema and subSchema, implements the validation phase.
+//
+// created 28-02-2013
+
+package gojsonschema
+
+import (
+ "encoding/json"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+func Validate(ls JSONLoader, ld JSONLoader) (*Result, error) {
+
+ var err error
+
+ // load schema
+
+ schema, err := NewSchema(ls)
+ if err != nil {
+ return nil, err
+ }
+
+ // begine validation
+
+ return schema.Validate(ld)
+
+}
+
+func (v *Schema) Validate(l JSONLoader) (*Result, error) {
+
+ // load document
+
+ root, err := l.LoadJSON()
+ if err != nil {
+ return nil, err
+ }
+
+ // begin validation
+
+ result := &Result{}
+ context := newJsonContext(STRING_CONTEXT_ROOT, nil)
+ v.rootSchema.validateRecursive(v.rootSchema, root, result, context)
+
+ return result, nil
+
+}
+
+func (v *subSchema) subValidateWithContext(document interface{}, context *jsonContext) *Result {
+ result := &Result{}
+ v.validateRecursive(v, document, result, context)
+ return result
+}
+
+// Walker function to validate the json recursively against the subSchema
+func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *jsonContext) {
+
+ if internalLogEnabled {
+ internalLog("validateRecursive %s", context.String())
+ internalLog(" %v", currentNode)
+ }
+
+ // Handle referenced schemas, returns directly when a $ref is found
+ if currentSubSchema.refSchema != nil {
+ v.validateRecursive(currentSubSchema.refSchema, currentNode, result, context)
+ return
+ }
+
+ // Check for null value
+ if currentNode == nil {
+ if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_NULL) {
+ result.addError(
+ new(InvalidTypeError),
+ context,
+ currentNode,
+ ErrorDetails{
+ "expected": currentSubSchema.types.String(),
+ "given": TYPE_NULL,
+ },
+ )
+ return
+ }
+
+ currentSubSchema.validateSchema(currentSubSchema, currentNode, result, context)
+ v.validateCommon(currentSubSchema, currentNode, result, context)
+
+ } else { // Not a null value
+
+ if isJsonNumber(currentNode) {
+
+ value := currentNode.(json.Number)
+
+ _, isValidInt64, _ := checkJsonNumber(value)
+
+ validType := currentSubSchema.types.Contains(TYPE_NUMBER) || (isValidInt64 && currentSubSchema.types.Contains(TYPE_INTEGER))
+
+ if currentSubSchema.types.IsTyped() && !validType {
+
+ givenType := TYPE_INTEGER
+ if !isValidInt64 {
+ givenType = TYPE_NUMBER
+ }
+
+ result.addError(
+ new(InvalidTypeError),
+ context,
+ currentNode,
+ ErrorDetails{
+ "expected": currentSubSchema.types.String(),
+ "given": givenType,
+ },
+ )
+ return
+ }
+
+ currentSubSchema.validateSchema(currentSubSchema, value, result, context)
+ v.validateNumber(currentSubSchema, value, result, context)
+ v.validateCommon(currentSubSchema, value, result, context)
+ v.validateString(currentSubSchema, value, result, context)
+
+ } else {
+
+ rValue := reflect.ValueOf(currentNode)
+ rKind := rValue.Kind()
+
+ switch rKind {
+
+ // Slice => JSON array
+
+ case reflect.Slice:
+
+ if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_ARRAY) {
+ result.addError(
+ new(InvalidTypeError),
+ context,
+ currentNode,
+ ErrorDetails{
+ "expected": currentSubSchema.types.String(),
+ "given": TYPE_ARRAY,
+ },
+ )
+ return
+ }
+
+ castCurrentNode := currentNode.([]interface{})
+
+ currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context)
+
+ v.validateArray(currentSubSchema, castCurrentNode, result, context)
+ v.validateCommon(currentSubSchema, castCurrentNode, result, context)
+
+ // Map => JSON object
+
+ case reflect.Map:
+ if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_OBJECT) {
+ result.addError(
+ new(InvalidTypeError),
+ context,
+ currentNode,
+ ErrorDetails{
+ "expected": currentSubSchema.types.String(),
+ "given": TYPE_OBJECT,
+ },
+ )
+ return
+ }
+
+ castCurrentNode, ok := currentNode.(map[string]interface{})
+ if !ok {
+ castCurrentNode = convertDocumentNode(currentNode).(map[string]interface{})
+ }
+
+ currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context)
+
+ v.validateObject(currentSubSchema, castCurrentNode, result, context)
+ v.validateCommon(currentSubSchema, castCurrentNode, result, context)
+
+ for _, pSchema := range currentSubSchema.propertiesChildren {
+ nextNode, ok := castCurrentNode[pSchema.property]
+ if ok {
+ subContext := newJsonContext(pSchema.property, context)
+ v.validateRecursive(pSchema, nextNode, result, subContext)
+ }
+ }
+
+ // Simple JSON values : string, number, boolean
+
+ case reflect.Bool:
+
+ if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_BOOLEAN) {
+ result.addError(
+ new(InvalidTypeError),
+ context,
+ currentNode,
+ ErrorDetails{
+ "expected": currentSubSchema.types.String(),
+ "given": TYPE_BOOLEAN,
+ },
+ )
+ return
+ }
+
+ value := currentNode.(bool)
+
+ currentSubSchema.validateSchema(currentSubSchema, value, result, context)
+ v.validateNumber(currentSubSchema, value, result, context)
+ v.validateCommon(currentSubSchema, value, result, context)
+ v.validateString(currentSubSchema, value, result, context)
+
+ case reflect.String:
+
+ if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_STRING) {
+ result.addError(
+ new(InvalidTypeError),
+ context,
+ currentNode,
+ ErrorDetails{
+ "expected": currentSubSchema.types.String(),
+ "given": TYPE_STRING,
+ },
+ )
+ return
+ }
+
+ value := currentNode.(string)
+
+ currentSubSchema.validateSchema(currentSubSchema, value, result, context)
+ v.validateNumber(currentSubSchema, value, result, context)
+ v.validateCommon(currentSubSchema, value, result, context)
+ v.validateString(currentSubSchema, value, result, context)
+
+ }
+
+ }
+
+ }
+
+ result.incrementScore()
+}
+
+// Different kinds of validation there, subSchema / common / array / object / string...
+func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *jsonContext) {
+
+ if internalLogEnabled {
+ internalLog("validateSchema %s", context.String())
+ internalLog(" %v", currentNode)
+ }
+
+ if len(currentSubSchema.anyOf) > 0 {
+
+ validatedAnyOf := false
+ var bestValidationResult *Result
+
+ for _, anyOfSchema := range currentSubSchema.anyOf {
+ if !validatedAnyOf {
+ validationResult := anyOfSchema.subValidateWithContext(currentNode, context)
+ validatedAnyOf = validationResult.Valid()
+
+ if !validatedAnyOf && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) {
+ bestValidationResult = validationResult
+ }
+ }
+ }
+ if !validatedAnyOf {
+
+ result.addError(new(NumberAnyOfError), context, currentNode, ErrorDetails{})
+
+ if bestValidationResult != nil {
+ // add error messages of closest matching subSchema as
+ // that's probably the one the user was trying to match
+ result.mergeErrors(bestValidationResult)
+ }
+ }
+ }
+
+ if len(currentSubSchema.oneOf) > 0 {
+
+ nbValidated := 0
+ var bestValidationResult *Result
+
+ for _, oneOfSchema := range currentSubSchema.oneOf {
+ validationResult := oneOfSchema.subValidateWithContext(currentNode, context)
+ if validationResult.Valid() {
+ nbValidated++
+ } else if nbValidated == 0 && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) {
+ bestValidationResult = validationResult
+ }
+ }
+
+ if nbValidated != 1 {
+
+ result.addError(new(NumberOneOfError), context, currentNode, ErrorDetails{})
+
+ if nbValidated == 0 {
+ // add error messages of closest matching subSchema as
+ // that's probably the one the user was trying to match
+ result.mergeErrors(bestValidationResult)
+ }
+ }
+
+ }
+
+ if len(currentSubSchema.allOf) > 0 {
+ nbValidated := 0
+
+ for _, allOfSchema := range currentSubSchema.allOf {
+ validationResult := allOfSchema.subValidateWithContext(currentNode, context)
+ if validationResult.Valid() {
+ nbValidated++
+ }
+ result.mergeErrors(validationResult)
+ }
+
+ if nbValidated != len(currentSubSchema.allOf) {
+ result.addError(new(NumberAllOfError), context, currentNode, ErrorDetails{})
+ }
+ }
+
+ if currentSubSchema.not != nil {
+ validationResult := currentSubSchema.not.subValidateWithContext(currentNode, context)
+ if validationResult.Valid() {
+ result.addError(new(NumberNotError), context, currentNode, ErrorDetails{})
+ }
+ }
+
+ if currentSubSchema.dependencies != nil && len(currentSubSchema.dependencies) > 0 {
+ if isKind(currentNode, reflect.Map) {
+ for elementKey := range currentNode.(map[string]interface{}) {
+ if dependency, ok := currentSubSchema.dependencies[elementKey]; ok {
+ switch dependency := dependency.(type) {
+
+ case []string:
+ for _, dependOnKey := range dependency {
+ if _, dependencyResolved := currentNode.(map[string]interface{})[dependOnKey]; !dependencyResolved {
+ result.addError(
+ new(MissingDependencyError),
+ context,
+ currentNode,
+ ErrorDetails{"dependency": dependOnKey},
+ )
+ }
+ }
+
+ case *subSchema:
+ dependency.validateRecursive(dependency, currentNode, result, context)
+
+ }
+ }
+ }
+ }
+ }
+
+ result.incrementScore()
+}
+
+func (v *subSchema) validateCommon(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) {
+
+ if internalLogEnabled {
+ internalLog("validateCommon %s", context.String())
+ internalLog(" %v", value)
+ }
+
+ // enum:
+ if len(currentSubSchema.enum) > 0 {
+ has, err := currentSubSchema.ContainsEnum(value)
+ if err != nil {
+ result.addError(new(InternalError), context, value, ErrorDetails{"error": err})
+ }
+ if !has {
+ result.addError(
+ new(EnumError),
+ context,
+ value,
+ ErrorDetails{
+ "allowed": strings.Join(currentSubSchema.enum, ", "),
+ },
+ )
+ }
+ }
+
+ result.incrementScore()
+}
+
+func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface{}, result *Result, context *jsonContext) {
+
+ if internalLogEnabled {
+ internalLog("validateArray %s", context.String())
+ internalLog(" %v", value)
+ }
+
+ nbValues := len(value)
+
+ // TODO explain
+ if currentSubSchema.itemsChildrenIsSingleSchema {
+ for i := range value {
+ subContext := newJsonContext(strconv.Itoa(i), context)
+ validationResult := currentSubSchema.itemsChildren[0].subValidateWithContext(value[i], subContext)
+ result.mergeErrors(validationResult)
+ }
+ } else {
+ if currentSubSchema.itemsChildren != nil && len(currentSubSchema.itemsChildren) > 0 {
+
+ nbItems := len(currentSubSchema.itemsChildren)
+
+ // while we have both schemas and values, check them against each other
+ for i := 0; i != nbItems && i != nbValues; i++ {
+ subContext := newJsonContext(strconv.Itoa(i), context)
+ validationResult := currentSubSchema.itemsChildren[i].subValidateWithContext(value[i], subContext)
+ result.mergeErrors(validationResult)
+ }
+
+ if nbItems < nbValues {
+ // we have less schemas than elements in the instance array,
+ // but that might be ok if "additionalItems" is specified.
+
+ switch currentSubSchema.additionalItems.(type) {
+ case bool:
+ if !currentSubSchema.additionalItems.(bool) {
+ result.addError(new(ArrayNoAdditionalItemsError), context, value, ErrorDetails{})
+ }
+ case *subSchema:
+ additionalItemSchema := currentSubSchema.additionalItems.(*subSchema)
+ for i := nbItems; i != nbValues; i++ {
+ subContext := newJsonContext(strconv.Itoa(i), context)
+ validationResult := additionalItemSchema.subValidateWithContext(value[i], subContext)
+ result.mergeErrors(validationResult)
+ }
+ }
+ }
+ }
+ }
+
+ // minItems & maxItems
+ if currentSubSchema.minItems != nil {
+ if nbValues < int(*currentSubSchema.minItems) {
+ result.addError(
+ new(ArrayMinItemsError),
+ context,
+ value,
+ ErrorDetails{"min": *currentSubSchema.minItems},
+ )
+ }
+ }
+ if currentSubSchema.maxItems != nil {
+ if nbValues > int(*currentSubSchema.maxItems) {
+ result.addError(
+ new(ArrayMaxItemsError),
+ context,
+ value,
+ ErrorDetails{"max": *currentSubSchema.maxItems},
+ )
+ }
+ }
+
+ // uniqueItems:
+ if currentSubSchema.uniqueItems {
+ var stringifiedItems []string
+ for _, v := range value {
+ vString, err := marshalToJsonString(v)
+ if err != nil {
+ result.addError(new(InternalError), context, value, ErrorDetails{"err": err})
+ }
+ if isStringInSlice(stringifiedItems, *vString) {
+ result.addError(
+ new(ItemsMustBeUniqueError),
+ context,
+ value,
+ ErrorDetails{"type": TYPE_ARRAY},
+ )
+ }
+ stringifiedItems = append(stringifiedItems, *vString)
+ }
+ }
+
+ result.incrementScore()
+}
+
+func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string]interface{}, result *Result, context *jsonContext) {
+
+ if internalLogEnabled {
+ internalLog("validateObject %s", context.String())
+ internalLog(" %v", value)
+ }
+
+ // minProperties & maxProperties:
+ if currentSubSchema.minProperties != nil {
+ if len(value) < int(*currentSubSchema.minProperties) {
+ result.addError(
+ new(ArrayMinPropertiesError),
+ context,
+ value,
+ ErrorDetails{"min": *currentSubSchema.minProperties},
+ )
+ }
+ }
+ if currentSubSchema.maxProperties != nil {
+ if len(value) > int(*currentSubSchema.maxProperties) {
+ result.addError(
+ new(ArrayMaxPropertiesError),
+ context,
+ value,
+ ErrorDetails{"max": *currentSubSchema.maxProperties},
+ )
+ }
+ }
+
+ // required:
+ for _, requiredProperty := range currentSubSchema.required {
+ _, ok := value[requiredProperty]
+ if ok {
+ result.incrementScore()
+ } else {
+ result.addError(
+ new(RequiredError),
+ context,
+ value,
+ ErrorDetails{"property": requiredProperty},
+ )
+ }
+ }
+
+ // additionalProperty & patternProperty:
+ if currentSubSchema.additionalProperties != nil {
+
+ switch currentSubSchema.additionalProperties.(type) {
+ case bool:
+
+ if !currentSubSchema.additionalProperties.(bool) {
+
+ for pk := range value {
+
+ found := false
+ for _, spValue := range currentSubSchema.propertiesChildren {
+ if pk == spValue.property {
+ found = true
+ }
+ }
+
+ pp_has, pp_match := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context)
+
+ if found {
+
+ if pp_has && !pp_match {
+ result.addError(
+ new(AdditionalPropertyNotAllowedError),
+ context,
+ value[pk],
+ ErrorDetails{"property": pk},
+ )
+ }
+
+ } else {
+
+ if !pp_has || !pp_match {
+ result.addError(
+ new(AdditionalPropertyNotAllowedError),
+ context,
+ value[pk],
+ ErrorDetails{"property": pk},
+ )
+ }
+
+ }
+ }
+ }
+
+ case *subSchema:
+
+ additionalPropertiesSchema := currentSubSchema.additionalProperties.(*subSchema)
+ for pk := range value {
+
+ found := false
+ for _, spValue := range currentSubSchema.propertiesChildren {
+ if pk == spValue.property {
+ found = true
+ }
+ }
+
+ pp_has, pp_match := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context)
+
+ if found {
+
+ if pp_has && !pp_match {
+ validationResult := additionalPropertiesSchema.subValidateWithContext(value[pk], context)
+ result.mergeErrors(validationResult)
+ }
+
+ } else {
+
+ if !pp_has || !pp_match {
+ validationResult := additionalPropertiesSchema.subValidateWithContext(value[pk], context)
+ result.mergeErrors(validationResult)
+ }
+
+ }
+
+ }
+ }
+ } else {
+
+ for pk := range value {
+
+ pp_has, pp_match := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context)
+
+ if pp_has && !pp_match {
+
+ result.addError(
+ new(InvalidPropertyPatternError),
+ context,
+ value[pk],
+ ErrorDetails{
+ "property": pk,
+ "pattern": currentSubSchema.PatternPropertiesString(),
+ },
+ )
+ }
+
+ }
+ }
+
+ result.incrementScore()
+}
+
+func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key string, value interface{}, result *Result, context *jsonContext) (has bool, matched bool) {
+
+ if internalLogEnabled {
+ internalLog("validatePatternProperty %s", context.String())
+ internalLog(" %s %v", key, value)
+ }
+
+ has = false
+
+ validatedkey := false
+
+ for pk, pv := range currentSubSchema.patternProperties {
+ if matches, _ := regexp.MatchString(pk, key); matches {
+ has = true
+ subContext := newJsonContext(key, context)
+ validationResult := pv.subValidateWithContext(value, subContext)
+ result.mergeErrors(validationResult)
+ if validationResult.Valid() {
+ validatedkey = true
+ }
+ }
+ }
+
+ if !validatedkey {
+ return has, false
+ }
+
+ result.incrementScore()
+
+ return has, true
+}
+
+func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) {
+
+ // Ignore JSON numbers
+ if isJsonNumber(value) {
+ return
+ }
+
+ // Ignore non strings
+ if !isKind(value, reflect.String) {
+ return
+ }
+
+ if internalLogEnabled {
+ internalLog("validateString %s", context.String())
+ internalLog(" %v", value)
+ }
+
+ stringValue := value.(string)
+
+ // minLength & maxLength:
+ if currentSubSchema.minLength != nil {
+ if utf8.RuneCount([]byte(stringValue)) < int(*currentSubSchema.minLength) {
+ result.addError(
+ new(StringLengthGTEError),
+ context,
+ value,
+ ErrorDetails{"min": *currentSubSchema.minLength},
+ )
+ }
+ }
+ if currentSubSchema.maxLength != nil {
+ if utf8.RuneCount([]byte(stringValue)) > int(*currentSubSchema.maxLength) {
+ result.addError(
+ new(StringLengthLTEError),
+ context,
+ value,
+ ErrorDetails{"max": *currentSubSchema.maxLength},
+ )
+ }
+ }
+
+ // pattern:
+ if currentSubSchema.pattern != nil {
+ if !currentSubSchema.pattern.MatchString(stringValue) {
+ result.addError(
+ new(DoesNotMatchPatternError),
+ context,
+ value,
+ ErrorDetails{"pattern": currentSubSchema.pattern},
+ )
+
+ }
+ }
+
+ // format
+ if currentSubSchema.format != "" {
+ if !FormatCheckers.IsFormat(currentSubSchema.format, stringValue) {
+ result.addError(
+ new(DoesNotMatchFormatError),
+ context,
+ value,
+ ErrorDetails{"format": currentSubSchema.format},
+ )
+ }
+ }
+
+ result.incrementScore()
+}
+
+func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) {
+
+ // Ignore non numbers
+ if !isJsonNumber(value) {
+ return
+ }
+
+ if internalLogEnabled {
+ internalLog("validateNumber %s", context.String())
+ internalLog(" %v", value)
+ }
+
+ number := value.(json.Number)
+ float64Value, _ := number.Float64()
+
+ // multipleOf:
+ if currentSubSchema.multipleOf != nil {
+
+ if !isFloat64AnInteger(float64Value / *currentSubSchema.multipleOf) {
+ result.addError(
+ new(MultipleOfError),
+ context,
+ resultErrorFormatJsonNumber(number),
+ ErrorDetails{"multiple": *currentSubSchema.multipleOf},
+ )
+ }
+ }
+
+ //maximum & exclusiveMaximum:
+ if currentSubSchema.maximum != nil {
+ if currentSubSchema.exclusiveMaximum {
+ if float64Value >= *currentSubSchema.maximum {
+ result.addError(
+ new(NumberLTError),
+ context,
+ resultErrorFormatJsonNumber(number),
+ ErrorDetails{
+ "max": resultErrorFormatNumber(*currentSubSchema.maximum),
+ },
+ )
+ }
+ } else {
+ if float64Value > *currentSubSchema.maximum {
+ result.addError(
+ new(NumberLTEError),
+ context,
+ resultErrorFormatJsonNumber(number),
+ ErrorDetails{
+ "max": resultErrorFormatNumber(*currentSubSchema.maximum),
+ },
+ )
+ }
+ }
+ }
+
+ //minimum & exclusiveMinimum:
+ if currentSubSchema.minimum != nil {
+ if currentSubSchema.exclusiveMinimum {
+ if float64Value <= *currentSubSchema.minimum {
+ result.addError(
+ new(NumberGTError),
+ context,
+ resultErrorFormatJsonNumber(number),
+ ErrorDetails{
+ "min": resultErrorFormatNumber(*currentSubSchema.minimum),
+ },
+ )
+ }
+ } else {
+ if float64Value < *currentSubSchema.minimum {
+ result.addError(
+ new(NumberGTEError),
+ context,
+ resultErrorFormatJsonNumber(number),
+ ErrorDetails{
+ "min": resultErrorFormatNumber(*currentSubSchema.minimum),
+ },
+ )
+ }
+ }
+ }
+
+ // format
+ if currentSubSchema.format != "" {
+ if !FormatCheckers.IsFormat(currentSubSchema.format, float64Value) {
+ result.addError(
+ new(DoesNotMatchFormatError),
+ context,
+ value,
+ ErrorDetails{"format": currentSubSchema.format},
+ )
+ }
+ }
+
+ result.incrementScore()
+}
diff --git a/vendor/k8s.io/utils/README.md b/vendor/k8s.io/utils/README.md
deleted file mode 100644
index 2504649ef..000000000
--- a/vendor/k8s.io/utils/README.md
+++ /dev/null
@@ -1,51 +0,0 @@
-# Utils
-
-[![Build Status]](https://travis-ci.org/kubernetes/utils)
-
-A set of Go libraries that provide low-level,
-kubernetes-independent packages supplementing the [Go
-standard libs].
-
-## Purpose
-
-As Kubernetes grows and spins functionality out of its
-[core] and into cooperating repositories like
-[apiserver], [kubectl], [kubeadm], etc., the need
-arises for leaf repositories to house shared code and
-avoid cycles in repository relationships.
-
-This repository is intended to hold shared utilities
-with no Kubernetes dependence that may be of interest
-to any Go project. See these [instructions for moving]
-an existing package to this repository.
-
-
-## Criteria for adding code here
-
-- Used by multiple Kubernetes repositories.
-
-- Full unit test coverage.
-
-- Go tools compliant (`go get`, `go test`, etc.).
-
-- Complex enough to be worth vendoring, rather than copying.
-
-- Stable, or backward compatible, API.
-
-- _No dependence on any Kubernetes repository_.
-
-## Libraries
-
-- [Exec](/exec) provides an interface for `os/exec`. It makes it easier
- to mock and replace in tests, especially with
- the [FakeExec](exec/testing/fake_exec.go) struct.
-
-[Build Status]: https://travis-ci.org/kubernetes/utils.svg?branch=master
-[Go standard libs]: https://golang.org/pkg/#stdlib
-[api]: https://github.com/kubernetes/api
-[apiserver]: https://github.com/kubernetes/apiserver
-[core]: https://github.com/kubernetes/kubernetes
-[ingress]: https://github.com/kubernetes/ingress
-[kubeadm]: https://github.com/kubernetes/kubeadm
-[kubectl]: https://github.com/kubernetes/kubectl
-[instructions for moving]: ./HOWTOMOVE.md
diff --git a/vendor/k8s.io/utils/exec/doc.go b/vendor/k8s.io/utils/exec/doc.go
deleted file mode 100644
index cbb44bdb5..000000000
--- a/vendor/k8s.io/utils/exec/doc.go
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
-Copyright 2017 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package exec provides an injectable interface and implementations for running commands.
-package exec // import "k8s.io/utils/exec"
diff --git a/vendor/k8s.io/utils/exec/exec.go b/vendor/k8s.io/utils/exec/exec.go
deleted file mode 100644
index 3b23eceb1..000000000
--- a/vendor/k8s.io/utils/exec/exec.go
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
-Copyright 2017 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package exec
-
-import (
- "io"
- osexec "os/exec"
- "syscall"
- "time"
-)
-
-// ErrExecutableNotFound is returned if the executable is not found.
-var ErrExecutableNotFound = osexec.ErrNotFound
-
-// Interface is an interface that presents a subset of the os/exec API. Use this
-// when you want to inject fakeable/mockable exec behavior.
-type Interface interface {
- // Command returns a Cmd instance which can be used to run a single command.
- // This follows the pattern of package os/exec.
- Command(cmd string, args ...string) Cmd
-
- // LookPath wraps os/exec.LookPath
- LookPath(file string) (string, error)
-}
-
-// Cmd is an interface that presents an API that is very similar to Cmd from os/exec.
-// As more functionality is needed, this can grow. Since Cmd is a struct, we will have
-// to replace fields with get/set method pairs.
-type Cmd interface {
- // Run runs the command to the completion.
- Run() error
- // CombinedOutput runs the command and returns its combined standard output
- // and standard error. This follows the pattern of package os/exec.
- CombinedOutput() ([]byte, error)
- // Output runs the command and returns standard output, but not standard err
- Output() ([]byte, error)
- SetDir(dir string)
- SetStdin(in io.Reader)
- SetStdout(out io.Writer)
- SetStderr(out io.Writer)
- // Stops the command by sending SIGTERM. It is not guaranteed the
- // process will stop before this function returns. If the process is not
- // responding, an internal timer function will send a SIGKILL to force
- // terminate after 10 seconds.
- Stop()
-}
-
-// ExitError is an interface that presents an API similar to os.ProcessState, which is
-// what ExitError from os/exec is. This is designed to make testing a bit easier and
-// probably loses some of the cross-platform properties of the underlying library.
-type ExitError interface {
- String() string
- Error() string
- Exited() bool
- ExitStatus() int
-}
-
-// Implements Interface in terms of really exec()ing.
-type executor struct{}
-
-// New returns a new Interface which will os/exec to run commands.
-func New() Interface {
- return &executor{}
-}
-
-// Command is part of the Interface interface.
-func (executor *executor) Command(cmd string, args ...string) Cmd {
- return (*cmdWrapper)(osexec.Command(cmd, args...))
-}
-
-// LookPath is part of the Interface interface
-func (executor *executor) LookPath(file string) (string, error) {
- return osexec.LookPath(file)
-}
-
-// Wraps exec.Cmd so we can capture errors.
-type cmdWrapper osexec.Cmd
-
-var _ Cmd = &cmdWrapper{}
-
-func (cmd *cmdWrapper) SetDir(dir string) {
- cmd.Dir = dir
-}
-
-func (cmd *cmdWrapper) SetStdin(in io.Reader) {
- cmd.Stdin = in
-}
-
-func (cmd *cmdWrapper) SetStdout(out io.Writer) {
- cmd.Stdout = out
-}
-
-func (cmd *cmdWrapper) SetStderr(out io.Writer) {
- cmd.Stderr = out
-}
-
-// Run is part of the Cmd interface.
-func (cmd *cmdWrapper) Run() error {
- return (*osexec.Cmd)(cmd).Run()
-}
-
-// CombinedOutput is part of the Cmd interface.
-func (cmd *cmdWrapper) CombinedOutput() ([]byte, error) {
- out, err := (*osexec.Cmd)(cmd).CombinedOutput()
- if err != nil {
- return out, handleError(err)
- }
- return out, nil
-}
-
-func (cmd *cmdWrapper) Output() ([]byte, error) {
- out, err := (*osexec.Cmd)(cmd).Output()
- if err != nil {
- return out, handleError(err)
- }
- return out, nil
-}
-
-// Stop is part of the Cmd interface.
-func (cmd *cmdWrapper) Stop() {
- c := (*osexec.Cmd)(cmd)
- if c.ProcessState.Exited() {
- return
- }
- c.Process.Signal(syscall.SIGTERM)
- time.AfterFunc(10*time.Second, func() {
- if c.ProcessState.Exited() {
- return
- }
- c.Process.Signal(syscall.SIGKILL)
- })
-}
-
-func handleError(err error) error {
- if ee, ok := err.(*osexec.ExitError); ok {
- // Force a compile fail if exitErrorWrapper can't convert to ExitError.
- var x ExitError = &ExitErrorWrapper{ee}
- return x
- }
- if ee, ok := err.(*osexec.Error); ok {
- if ee.Err == osexec.ErrNotFound {
- return ErrExecutableNotFound
- }
- }
- return err
-}
-
-// ExitErrorWrapper is an implementation of ExitError in terms of os/exec ExitError.
-// Note: standard exec.ExitError is type *os.ProcessState, which already implements Exited().
-type ExitErrorWrapper struct {
- *osexec.ExitError
-}
-
-var _ ExitError = ExitErrorWrapper{}
-
-// ExitStatus is part of the ExitError interface.
-func (eew ExitErrorWrapper) ExitStatus() int {
- ws, ok := eew.Sys().(syscall.WaitStatus)
- if !ok {
- panic("can't call ExitStatus() on a non-WaitStatus exitErrorWrapper")
- }
- return ws.ExitStatus()
-}
-
-// CodeExitError is an implementation of ExitError consisting of an error object
-// and an exit code (the upper bits of os.exec.ExitStatus).
-type CodeExitError struct {
- Err error
- Code int
-}
-
-var _ ExitError = CodeExitError{}
-
-func (e CodeExitError) Error() string {
- return e.Err.Error()
-}
-
-func (e CodeExitError) String() string {
- return e.Err.Error()
-}
-
-func (e CodeExitError) Exited() bool {
- return true
-}
-
-func (e CodeExitError) ExitStatus() int {
- return e.Code
-}