summaryrefslogtreecommitdiff
path: root/vendor/github.com
diff options
context:
space:
mode:
authorMatthew Heon <matthew.heon@pm.me>2019-05-21 15:54:26 -0400
committerGitHub <noreply@github.com>2019-05-21 15:54:26 -0400
commitcec56b0075eeec6a8cdfba3138dec32fdcd36b47 (patch)
tree6b129e2b8f58e0a46b8d25cd4459bb6e3c9436f7 /vendor/github.com
parente6277b1a2dedf8a4d80a270ddac0be4c22ed3e6d (diff)
parent1d505f6875d27dccafb777853a51045f82888f00 (diff)
downloadpodman-cec56b0075eeec6a8cdfba3138dec32fdcd36b47.tar.gz
podman-cec56b0075eeec6a8cdfba3138dec32fdcd36b47.tar.bz2
podman-cec56b0075eeec6a8cdfba3138dec32fdcd36b47.zip
Merge pull request #3168 from rhatdan/vendor
Update vendor of buildah and containers/images
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/boltdb/bolt/freelist.go252
-rw-r--r--vendor/github.com/containers/buildah/add.go78
-rw-r--r--vendor/github.com/containers/buildah/buildah.go4
-rw-r--r--vendor/github.com/containers/buildah/chroot/run.go12
-rw-r--r--vendor/github.com/containers/buildah/commit.go29
-rw-r--r--vendor/github.com/containers/buildah/imagebuildah/build.go121
-rw-r--r--vendor/github.com/containers/buildah/new.go1
-rw-r--r--vendor/github.com/containers/buildah/pkg/cli/common.go15
-rw-r--r--vendor/github.com/containers/buildah/pkg/overlay/overlay.go46
-rw-r--r--vendor/github.com/containers/buildah/pkg/parse/parse.go20
-rw-r--r--vendor/github.com/containers/buildah/pull.go22
-rw-r--r--vendor/github.com/containers/buildah/run_linux.go41
-rw-r--r--vendor/github.com/containers/buildah/util/util.go2
-rw-r--r--vendor/github.com/containers/buildah/vendor.conf7
-rw-r--r--vendor/github.com/containers/image/docker/docker_client.go71
-rw-r--r--vendor/github.com/containers/image/docker/docker_image.go2
-rw-r--r--vendor/github.com/containers/image/docker/docker_image_src.go98
-rw-r--r--vendor/github.com/containers/image/docker/docker_transport.go12
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go2
-rw-r--r--vendor/github.com/containers/image/pkg/blobinfocache/default.go12
-rw-r--r--vendor/github.com/containers/image/pkg/docker/config/config.go15
-rw-r--r--vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go92
-rw-r--r--vendor/github.com/containers/image/storage/storage_transport.go3
-rw-r--r--vendor/github.com/containers/image/transports/alltransports/alltransports.go12
-rw-r--r--vendor/github.com/containers/image/types/types.go1
-rw-r--r--vendor/github.com/containers/image/vendor.conf5
-rw-r--r--vendor/github.com/containers/image/version/version.go6
-rw-r--r--vendor/github.com/etcd-io/bbolt/LICENSE (renamed from vendor/github.com/boltdb/bolt/LICENSE)0
-rw-r--r--vendor/github.com/etcd-io/bbolt/README.md (renamed from vendor/github.com/boltdb/bolt/README.md)182
-rw-r--r--vendor/github.com/etcd-io/bbolt/bolt_386.go (renamed from vendor/github.com/boltdb/bolt/bolt_386.go)2
-rw-r--r--vendor/github.com/etcd-io/bbolt/bolt_amd64.go (renamed from vendor/github.com/boltdb/bolt/bolt_amd64.go)2
-rw-r--r--vendor/github.com/etcd-io/bbolt/bolt_arm.go (renamed from vendor/github.com/boltdb/bolt/bolt_arm.go)2
-rw-r--r--vendor/github.com/etcd-io/bbolt/bolt_arm64.go (renamed from vendor/github.com/boltdb/bolt/bolt_arm64.go)2
-rw-r--r--vendor/github.com/etcd-io/bbolt/bolt_linux.go (renamed from vendor/github.com/boltdb/bolt/bolt_linux.go)2
-rw-r--r--vendor/github.com/etcd-io/bbolt/bolt_mips64x.go12
-rw-r--r--vendor/github.com/etcd-io/bbolt/bolt_mipsx.go12
-rw-r--r--vendor/github.com/etcd-io/bbolt/bolt_openbsd.go (renamed from vendor/github.com/boltdb/bolt/bolt_openbsd.go)2
-rw-r--r--vendor/github.com/etcd-io/bbolt/bolt_ppc.go (renamed from vendor/github.com/boltdb/bolt/bolt_ppc.go)5
-rw-r--r--vendor/github.com/etcd-io/bbolt/bolt_ppc64.go (renamed from vendor/github.com/boltdb/bolt/bolt_ppc64.go)2
-rw-r--r--vendor/github.com/etcd-io/bbolt/bolt_ppc64le.go (renamed from vendor/github.com/boltdb/bolt/bolt_ppc64le.go)2
-rw-r--r--vendor/github.com/etcd-io/bbolt/bolt_s390x.go (renamed from vendor/github.com/boltdb/bolt/bolt_s390x.go)2
-rw-r--r--vendor/github.com/etcd-io/bbolt/bolt_unix.go (renamed from vendor/github.com/boltdb/bolt/bolt_unix.go)42
-rw-r--r--vendor/github.com/etcd-io/bbolt/bolt_unix_solaris.go (renamed from vendor/github.com/boltdb/bolt/bolt_unix_solaris.go)44
-rw-r--r--vendor/github.com/etcd-io/bbolt/bolt_windows.go (renamed from vendor/github.com/boltdb/bolt/bolt_windows.go)57
-rw-r--r--vendor/github.com/etcd-io/bbolt/boltsync_unix.go (renamed from vendor/github.com/boltdb/bolt/boltsync_unix.go)2
-rw-r--r--vendor/github.com/etcd-io/bbolt/bucket.go (renamed from vendor/github.com/boltdb/bolt/bucket.go)16
-rw-r--r--vendor/github.com/etcd-io/bbolt/cursor.go (renamed from vendor/github.com/boltdb/bolt/cursor.go)10
-rw-r--r--vendor/github.com/etcd-io/bbolt/db.go (renamed from vendor/github.com/boltdb/bolt/db.go)251
-rw-r--r--vendor/github.com/etcd-io/bbolt/doc.go (renamed from vendor/github.com/boltdb/bolt/doc.go)4
-rw-r--r--vendor/github.com/etcd-io/bbolt/errors.go (renamed from vendor/github.com/boltdb/bolt/errors.go)2
-rw-r--r--vendor/github.com/etcd-io/bbolt/freelist.go370
-rw-r--r--vendor/github.com/etcd-io/bbolt/freelist_hmap.go178
-rw-r--r--vendor/github.com/etcd-io/bbolt/node.go (renamed from vendor/github.com/boltdb/bolt/node.go)4
-rw-r--r--vendor/github.com/etcd-io/bbolt/page.go (renamed from vendor/github.com/boltdb/bolt/page.go)2
-rw-r--r--vendor/github.com/etcd-io/bbolt/tx.go (renamed from vendor/github.com/boltdb/bolt/tx.go)85
55 files changed, 1540 insertions, 737 deletions
diff --git a/vendor/github.com/boltdb/bolt/freelist.go b/vendor/github.com/boltdb/bolt/freelist.go
deleted file mode 100644
index aba48f58c..000000000
--- a/vendor/github.com/boltdb/bolt/freelist.go
+++ /dev/null
@@ -1,252 +0,0 @@
-package bolt
-
-import (
- "fmt"
- "sort"
- "unsafe"
-)
-
-// freelist represents a list of all pages that are available for allocation.
-// It also tracks pages that have been freed but are still in use by open transactions.
-type freelist struct {
- ids []pgid // all free and available free page ids.
- pending map[txid][]pgid // mapping of soon-to-be free page ids by tx.
- cache map[pgid]bool // fast lookup of all free and pending page ids.
-}
-
-// newFreelist returns an empty, initialized freelist.
-func newFreelist() *freelist {
- return &freelist{
- pending: make(map[txid][]pgid),
- cache: make(map[pgid]bool),
- }
-}
-
-// size returns the size of the page after serialization.
-func (f *freelist) size() int {
- n := f.count()
- if n >= 0xFFFF {
- // The first element will be used to store the count. See freelist.write.
- n++
- }
- return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
-}
-
-// count returns count of pages on the freelist
-func (f *freelist) count() int {
- return f.free_count() + f.pending_count()
-}
-
-// free_count returns count of free pages
-func (f *freelist) free_count() int {
- return len(f.ids)
-}
-
-// pending_count returns count of pending pages
-func (f *freelist) pending_count() int {
- var count int
- for _, list := range f.pending {
- count += len(list)
- }
- return count
-}
-
-// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
-// f.count returns the minimum length required for dst.
-func (f *freelist) copyall(dst []pgid) {
- m := make(pgids, 0, f.pending_count())
- for _, list := range f.pending {
- m = append(m, list...)
- }
- sort.Sort(m)
- mergepgids(dst, f.ids, m)
-}
-
-// allocate returns the starting page id of a contiguous list of pages of a given size.
-// If a contiguous block cannot be found then 0 is returned.
-func (f *freelist) allocate(n int) pgid {
- if len(f.ids) == 0 {
- return 0
- }
-
- var initial, previd pgid
- for i, id := range f.ids {
- if id <= 1 {
- panic(fmt.Sprintf("invalid page allocation: %d", id))
- }
-
- // Reset initial page if this is not contiguous.
- if previd == 0 || id-previd != 1 {
- initial = id
- }
-
- // If we found a contiguous block then remove it and return it.
- if (id-initial)+1 == pgid(n) {
- // If we're allocating off the beginning then take the fast path
- // and just adjust the existing slice. This will use extra memory
- // temporarily but the append() in free() will realloc the slice
- // as is necessary.
- if (i + 1) == n {
- f.ids = f.ids[i+1:]
- } else {
- copy(f.ids[i-n+1:], f.ids[i+1:])
- f.ids = f.ids[:len(f.ids)-n]
- }
-
- // Remove from the free cache.
- for i := pgid(0); i < pgid(n); i++ {
- delete(f.cache, initial+i)
- }
-
- return initial
- }
-
- previd = id
- }
- return 0
-}
-
-// free releases a page and its overflow for a given transaction id.
-// If the page is already free then a panic will occur.
-func (f *freelist) free(txid txid, p *page) {
- if p.id <= 1 {
- panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
- }
-
- // Free page and all its overflow pages.
- var ids = f.pending[txid]
- for id := p.id; id <= p.id+pgid(p.overflow); id++ {
- // Verify that page is not already free.
- if f.cache[id] {
- panic(fmt.Sprintf("page %d already freed", id))
- }
-
- // Add to the freelist and cache.
- ids = append(ids, id)
- f.cache[id] = true
- }
- f.pending[txid] = ids
-}
-
-// release moves all page ids for a transaction id (or older) to the freelist.
-func (f *freelist) release(txid txid) {
- m := make(pgids, 0)
- for tid, ids := range f.pending {
- if tid <= txid {
- // Move transaction's pending pages to the available freelist.
- // Don't remove from the cache since the page is still free.
- m = append(m, ids...)
- delete(f.pending, tid)
- }
- }
- sort.Sort(m)
- f.ids = pgids(f.ids).merge(m)
-}
-
-// rollback removes the pages from a given pending tx.
-func (f *freelist) rollback(txid txid) {
- // Remove page ids from cache.
- for _, id := range f.pending[txid] {
- delete(f.cache, id)
- }
-
- // Remove pages from pending list.
- delete(f.pending, txid)
-}
-
-// freed returns whether a given page is in the free list.
-func (f *freelist) freed(pgid pgid) bool {
- return f.cache[pgid]
-}
-
-// read initializes the freelist from a freelist page.
-func (f *freelist) read(p *page) {
- // If the page.count is at the max uint16 value (64k) then it's considered
- // an overflow and the size of the freelist is stored as the first element.
- idx, count := 0, int(p.count)
- if count == 0xFFFF {
- idx = 1
- count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0])
- }
-
- // Copy the list of page ids from the freelist.
- if count == 0 {
- f.ids = nil
- } else {
- ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
- f.ids = make([]pgid, len(ids))
- copy(f.ids, ids)
-
- // Make sure they're sorted.
- sort.Sort(pgids(f.ids))
- }
-
- // Rebuild the page cache.
- f.reindex()
-}
-
-// write writes the page ids onto a freelist page. All free and pending ids are
-// saved to disk since in the event of a program crash, all pending ids will
-// become free.
-func (f *freelist) write(p *page) error {
- // Combine the old free pgids and pgids waiting on an open transaction.
-
- // Update the header flag.
- p.flags |= freelistPageFlag
-
- // The page.count can only hold up to 64k elements so if we overflow that
- // number then we handle it by putting the size in the first element.
- lenids := f.count()
- if lenids == 0 {
- p.count = uint16(lenids)
- } else if lenids < 0xFFFF {
- p.count = uint16(lenids)
- f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
- } else {
- p.count = 0xFFFF
- ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids)
- f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
- }
-
- return nil
-}
-
-// reload reads the freelist from a page and filters out pending items.
-func (f *freelist) reload(p *page) {
- f.read(p)
-
- // Build a cache of only pending pages.
- pcache := make(map[pgid]bool)
- for _, pendingIDs := range f.pending {
- for _, pendingID := range pendingIDs {
- pcache[pendingID] = true
- }
- }
-
- // Check each page in the freelist and build a new available freelist
- // with any pages not in the pending lists.
- var a []pgid
- for _, id := range f.ids {
- if !pcache[id] {
- a = append(a, id)
- }
- }
- f.ids = a
-
- // Once the available list is rebuilt then rebuild the free cache so that
- // it includes the available and pending free pages.
- f.reindex()
-}
-
-// reindex rebuilds the free cache based on available and pending free lists.
-func (f *freelist) reindex() {
- f.cache = make(map[pgid]bool, len(f.ids))
- for _, id := range f.ids {
- f.cache[id] = true
- }
- for _, pendingIDs := range f.pending {
- for _, pendingID := range pendingIDs {
- f.cache[pendingID] = true
- }
- }
-}
diff --git a/vendor/github.com/containers/buildah/add.go b/vendor/github.com/containers/buildah/add.go
index d67a481f1..589e090a8 100644
--- a/vendor/github.com/containers/buildah/add.go
+++ b/vendor/github.com/containers/buildah/add.go
@@ -15,6 +15,7 @@ import (
"github.com/containers/buildah/util"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/idtools"
+ "github.com/containers/storage/pkg/system"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -88,7 +89,7 @@ func addURL(destination, srcurl string, owner idtools.IDPair, hasher io.Writer)
// filesystem, optionally extracting contents of local files that look like
// non-empty archives.
func (b *Builder) Add(destination string, extract bool, options AddAndCopyOptions, source ...string) error {
- excludes := DockerIgnoreHelper(options.Excludes, options.ContextDir)
+ excludes := dockerIgnoreHelper(options.Excludes, options.ContextDir)
mountPoint, err := b.Mount(b.MountLabel)
if err != nil {
return err
@@ -177,16 +178,16 @@ func (b *Builder) user(mountPoint string, userspec string) (specs.User, error) {
return u, err
}
-// DockerIgnore struct keep info from .dockerignore
-type DockerIgnore struct {
+// dockerIgnore struct keep info from .dockerignore
+type dockerIgnore struct {
ExcludePath string
IsExcluded bool
}
-// DockerIgnoreHelper returns the lines from .dockerignore file without the comments
+// dockerIgnoreHelper returns the lines from .dockerignore file without the comments
// and reverses the order
-func DockerIgnoreHelper(lines []string, contextDir string) []DockerIgnore {
- var excludes []DockerIgnore
+func dockerIgnoreHelper(lines []string, contextDir string) []dockerIgnore {
+ var excludes []dockerIgnore
// the last match of a file in the .dockerignmatches determines whether it is included or excluded
// reverse the order
for i := len(lines) - 1; i >= 0; i-- {
@@ -200,15 +201,15 @@ func DockerIgnoreHelper(lines []string, contextDir string) []DockerIgnore {
exclude = strings.TrimPrefix(exclude, "!")
excludeFlag = false
}
- excludes = append(excludes, DockerIgnore{ExcludePath: filepath.Join(contextDir, exclude), IsExcluded: excludeFlag})
+ excludes = append(excludes, dockerIgnore{ExcludePath: filepath.Join(contextDir, exclude), IsExcluded: excludeFlag})
}
if len(excludes) != 0 {
- excludes = append(excludes, DockerIgnore{ExcludePath: filepath.Join(contextDir, ".dockerignore"), IsExcluded: true})
+ excludes = append(excludes, dockerIgnore{ExcludePath: filepath.Join(contextDir, ".dockerignore"), IsExcluded: true})
}
return excludes
}
-func addHelper(excludes []DockerIgnore, extract bool, dest string, destfi os.FileInfo, hostOwner idtools.IDPair, options AddAndCopyOptions, copyFileWithTar, copyWithTar, untarPath func(src, dest string) error, source ...string) error {
+func addHelper(excludes []dockerIgnore, extract bool, dest string, destfi os.FileInfo, hostOwner idtools.IDPair, options AddAndCopyOptions, copyFileWithTar, copyWithTar, untarPath func(src, dest string) error, source ...string) error {
dirsInDockerignore, err := getDirsInDockerignore(options.ContextDir, excludes)
if err != nil {
return errors.Wrapf(err, "error checking directories in .dockerignore")
@@ -270,9 +271,6 @@ func addHelper(excludes []DockerIgnore, extract bool, dest string, destfi os.Fil
if err != nil {
return err
}
- if info.IsDir() {
- return nil
- }
for _, exclude := range excludes {
match, err := filepath.Match(filepath.Clean(exclude.ExcludePath), filepath.Clean(path))
if err != nil {
@@ -292,7 +290,25 @@ func addHelper(excludes []DockerIgnore, extract bool, dest string, destfi os.Fil
break
}
// combine the filename with the dest directory
- fpath := strings.TrimPrefix(path, esrc)
+ fpath, err := filepath.Rel(esrc, path)
+ if err != nil {
+ return errors.Wrapf(err, "error converting %s to a path relative to %s", path, esrc)
+ }
+ mtime := info.ModTime()
+ atime := mtime
+ times := []syscall.Timespec{
+ {Sec: atime.Unix(), Nsec: atime.UnixNano() % 1000000000},
+ {Sec: mtime.Unix(), Nsec: mtime.UnixNano() % 1000000000},
+ }
+ if info.IsDir() {
+ return addHelperDirectory(esrc, path, filepath.Join(dest, fpath), info, hostOwner, times)
+ }
+ if info.Mode()&os.ModeSymlink == os.ModeSymlink {
+ return addHelperSymlink(path, filepath.Join(dest, fpath), info, hostOwner, times)
+ }
+ if !info.Mode().IsRegular() {
+ return errors.Errorf("error copying %q to %q: source is not a regular file; file mode is %s", path, dest, info.Mode())
+ }
if err = copyFileWithTar(path, filepath.Join(dest, fpath)); err != nil {
return errors.Wrapf(err, "error copying %q to %q", path, dest)
}
@@ -343,7 +359,41 @@ func addHelper(excludes []DockerIgnore, extract bool, dest string, destfi os.Fil
return nil
}
-func getDirsInDockerignore(srcAbsPath string, excludes []DockerIgnore) (map[string]string, error) {
+func addHelperDirectory(esrc, path, dest string, info os.FileInfo, hostOwner idtools.IDPair, times []syscall.Timespec) error {
+ if err := idtools.MkdirAllAndChownNew(dest, info.Mode().Perm(), hostOwner); err != nil {
+ // discard only EEXIST on the top directory, which would have been created earlier in the caller
+ if !os.IsExist(err) || path != esrc {
+ return errors.Errorf("error creating directory %q", dest)
+ }
+ }
+ if err := idtools.SafeLchown(dest, hostOwner.UID, hostOwner.GID); err != nil {
+ return errors.Wrapf(err, "error setting owner of directory %q to %d:%d", dest, hostOwner.UID, hostOwner.GID)
+ }
+ if err := system.LUtimesNano(dest, times); err != nil {
+ return errors.Wrapf(err, "error setting dates on directory %q", dest)
+ }
+ return nil
+}
+
+func addHelperSymlink(src, dest string, info os.FileInfo, hostOwner idtools.IDPair, times []syscall.Timespec) error {
+ linkContents, err := os.Readlink(src)
+ if err != nil {
+ return errors.Wrapf(err, "error reading contents of symbolic link at %q", src)
+ }
+ if err = os.Symlink(linkContents, dest); err != nil {
+ return errors.Wrapf(err, "error creating symbolic link to %q at %q", linkContents, dest)
+ }
+ if err = idtools.SafeLchown(dest, hostOwner.UID, hostOwner.GID); err != nil {
+ return errors.Wrapf(err, "error setting owner of symbolic link %q to %d:%d", dest, hostOwner.UID, hostOwner.GID)
+ }
+ if err = system.LUtimesNano(dest, times); err != nil {
+ return errors.Wrapf(err, "error setting dates on symbolic link %q", dest)
+ }
+ logrus.Debugf("Symlink(%s, %s)", linkContents, dest)
+ return nil
+}
+
+func getDirsInDockerignore(srcAbsPath string, excludes []dockerIgnore) (map[string]string, error) {
visitedDir := make(map[string]string)
if len(excludes) == 0 {
return visitedDir, nil
diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go
index 16f1a64fe..33b7afccd 100644
--- a/vendor/github.com/containers/buildah/buildah.go
+++ b/vendor/github.com/containers/buildah/buildah.go
@@ -26,7 +26,7 @@ const (
Package = "buildah"
// Version for the Package. Bump version in contrib/rpm/buildah.spec
// too.
- Version = "1.8.2"
+ Version = "1.9.0-dev"
// The value we use to identify what type of information, currently a
// serialized Builder structure, we are using as per-container state.
// This should only be changed when we make incompatible changes to
@@ -191,6 +191,8 @@ type Builder struct {
TopLayer string
// Format for the build Image
Format string
+ // TempVolumes are temporary mount points created during container runs
+ TempVolumes map[string]bool
}
// BuilderInfo are used as objects to display container information
diff --git a/vendor/github.com/containers/buildah/chroot/run.go b/vendor/github.com/containers/buildah/chroot/run.go
index 1c3ac65f3..c65926c8e 100644
--- a/vendor/github.com/containers/buildah/chroot/run.go
+++ b/vendor/github.com/containers/buildah/chroot/run.go
@@ -1071,7 +1071,7 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
}
}
// Skip anything that isn't a bind or tmpfs mount.
- if m.Type != "bind" && m.Type != "tmpfs" {
+ if m.Type != "bind" && m.Type != "tmpfs" && m.Type != "overlay" {
logrus.Debugf("skipping mount of type %q on %q", m.Type, m.Destination)
continue
}
@@ -1083,10 +1083,12 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
if err != nil {
return undoBinds, errors.Wrapf(err, "error examining %q for mounting in mount namespace", m.Source)
}
+ case "overlay":
+ fallthrough
case "tmpfs":
srcinfo, err = os.Stat("/")
if err != nil {
- return undoBinds, errors.Wrapf(err, "error examining / to use as a template for a tmpfs")
+ return undoBinds, errors.Wrapf(err, "error examining / to use as a template for a %s", m.Type)
}
}
target := filepath.Join(spec.Root.Path, m.Destination)
@@ -1145,6 +1147,12 @@ func setupChrootBindMounts(spec *specs.Spec, bundlePath string) (undoBinds func(
return undoBinds, errors.Wrapf(err, "error mounting tmpfs to %q in mount namespace (%q, %q)", m.Destination, target, strings.Join(m.Options, ","))
}
logrus.Debugf("mounted a tmpfs to %q", target)
+ case "overlay":
+ // Mount a overlay.
+ if err := mount.Mount(m.Source, target, m.Type, strings.Join(append(m.Options, "private"), ",")); err != nil {
+ return undoBinds, errors.Wrapf(err, "error mounting overlay to %q in mount namespace (%q, %q)", m.Destination, target, strings.Join(m.Options, ","))
+ }
+ logrus.Debugf("mounted a overlay to %q", target)
}
if err = unix.Statfs(target, &fs); err != nil {
return undoBinds, errors.Wrapf(err, "error checking if directory %q was bound read-only", target)
diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go
index 05d1550b3..7a373ea5e 100644
--- a/vendor/github.com/containers/buildah/commit.go
+++ b/vendor/github.com/containers/buildah/commit.go
@@ -106,6 +106,22 @@ type PushOptions struct {
Quiet bool
}
+var (
+ // commitPolicy bypasses any signing requirements when committing containers to images
+ commitPolicy = &signature.Policy{
+ Default: []signature.PolicyRequirement{signature.NewPRReject()},
+ Transports: map[string]signature.PolicyTransportScopes{
+ is.Transport.Name(): {
+ "": []signature.PolicyRequirement{
+ signature.NewPRInsecureAcceptAnything(),
+ },
+ },
+ },
+ }
+ // pushPolicy bypasses any signing requirements when pushing (copying) images from local storage
+ pushPolicy = commitPolicy
+)
+
// Commit writes the contents of the container, along with its updated
// configuration, to a new image in the specified location, and if we know how,
// add any additional tags that were specified. Returns the ID of the new image
@@ -141,11 +157,7 @@ func (b *Builder) Commit(ctx context.Context, dest types.ImageReference, options
return "", nil, "", errors.Errorf("commit access to registry for %q is blocked by configuration", transports.ImageName(dest))
}
- policy, err := signature.DefaultPolicy(systemContext)
- if err != nil {
- return imgID, nil, "", errors.Wrapf(err, "error obtaining default signature policy")
- }
- policyContext, err := signature.NewPolicyContext(policy)
+ policyContext, err := signature.NewPolicyContext(commitPolicy)
if err != nil {
return imgID, nil, "", errors.Wrapf(err, "error creating new signature policy context")
}
@@ -280,11 +292,7 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options
return nil, "", errors.Errorf("push access to registry for %q is blocked by configuration", transports.ImageName(dest))
}
- policy, err := signature.DefaultPolicy(systemContext)
- if err != nil {
- return nil, "", errors.Wrapf(err, "error obtaining default signature policy")
- }
- policyContext, err := signature.NewPolicyContext(policy)
+ policyContext, err := signature.NewPolicyContext(pushPolicy)
if err != nil {
return nil, "", errors.Wrapf(err, "error creating new signature policy context")
}
@@ -330,6 +338,5 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options
logrus.Warnf("error generating canonical reference with name %q and digest %s: %v", name, manifestDigest.String(), err)
}
}
- fmt.Printf("Successfully pushed %s@%s\n", dest.StringWithinTransport(), manifestDigest.String())
return ref, manifestDigest, nil
}
diff --git a/vendor/github.com/containers/buildah/imagebuildah/build.go b/vendor/github.com/containers/buildah/imagebuildah/build.go
index 85848e297..b8b9db0f3 100644
--- a/vendor/github.com/containers/buildah/imagebuildah/build.go
+++ b/vendor/github.com/containers/buildah/imagebuildah/build.go
@@ -27,6 +27,7 @@ import (
"github.com/containers/image/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/archive"
+ "github.com/cyphar/filepath-securejoin"
docker "github.com/fsouza/go-dockerclient"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runtime-spec/specs-go"
@@ -480,22 +481,22 @@ func (s *StageExecutor) volumeCacheRestore() error {
}
// Copy copies data into the working tree. The "Download" field is how
-// imagebuilder tells us the instruction was "ADD" and not "COPY".
+// imagebuilder tells us the instruction was "ADD" and not "COPY"
func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) error {
for _, copy := range copies {
- // If the file exists, check to see if it's a symlink.
- // If it is a symlink, convert to it's target otherwise
- // the symlink will be overwritten.
- fileDest, _ := os.Lstat(filepath.Join(s.mountPoint, copy.Dest))
- if fileDest != nil {
- if fileDest.Mode()&os.ModeSymlink != 0 {
- if symLink, err := resolveSymlink(s.mountPoint, copy.Dest); err == nil {
- copy.Dest = symLink
- } else {
- return errors.Wrapf(err, "error reading symbolic link to %q", copy.Dest)
- }
- }
+ // Check the file and see if part of it is a symlink.
+ // Convert it to the target if so. To be ultrasafe
+ // do the same for the mountpoint.
+ secureMountPoint, err := securejoin.SecureJoin("", s.mountPoint)
+ finalPath, err := securejoin.SecureJoin(secureMountPoint, copy.Dest)
+ if err != nil {
+ return errors.Wrapf(err, "error resolving symlinks for copy destination %s", copy.Dest)
+ }
+ if !strings.HasPrefix(finalPath, secureMountPoint) {
+ return errors.Wrapf(err, "error resolving copy destination %s", copy.Dest)
}
+ copy.Dest = strings.TrimPrefix(finalPath, secureMountPoint)
+
if copy.Download {
logrus.Debugf("ADD %#v, %#v", excludes, copy)
} else {
@@ -1218,22 +1219,32 @@ func (s *StageExecutor) layerExists(ctx context.Context, currNode *parser.Node,
if err != nil {
return "", errors.Wrap(err, "error getting image list from store")
}
- for _, image := range images {
- layer, err := s.executor.store.Layer(image.TopLayer)
+ var baseHistory []v1.History
+ if s.builder.FromImageID != "" {
+ baseHistory, err = s.executor.getImageHistory(ctx, s.builder.FromImageID)
if err != nil {
- return "", errors.Wrapf(err, "error getting top layer info")
+ return "", errors.Wrapf(err, "error getting history of base image %q", s.builder.FromImageID)
+ }
+ }
+ for _, image := range images {
+ var imageTopLayer *storage.Layer
+ if image.TopLayer != "" {
+ imageTopLayer, err = s.executor.store.Layer(image.TopLayer)
+ if err != nil {
+ return "", errors.Wrapf(err, "error getting top layer info")
+ }
}
// If the parent of the top layer of an image is equal to the last entry in b.topLayers
// it means that this image is potentially a cached intermediate image from a previous
// build. Next we double check that the history of this image is equivalent to the previous
// lines in the Dockerfile up till the point we are at in the build.
- if layer.Parent == s.executor.topLayers[len(s.executor.topLayers)-1] || layer.ID == s.executor.topLayers[len(s.executor.topLayers)-1] {
+ if imageTopLayer == nil || imageTopLayer.Parent == s.executor.topLayers[len(s.executor.topLayers)-1] || imageTopLayer.ID == s.executor.topLayers[len(s.executor.topLayers)-1] {
history, err := s.executor.getImageHistory(ctx, image.ID)
if err != nil {
return "", errors.Wrapf(err, "error getting history of %q", image.ID)
}
// children + currNode is the point of the Dockerfile we are currently at.
- if s.executor.historyMatches(append(children, currNode), history) {
+ if s.executor.historyMatches(baseHistory, currNode, history) {
// This checks if the files copied during build have been changed if the node is
// a COPY or ADD command.
filesMatch, err := s.copiedFilesMatch(currNode, history[len(history)-1].Created)
@@ -1282,32 +1293,60 @@ func (b *Executor) getCreatedBy(node *parser.Node) string {
return "/bin/sh -c #(nop) " + node.Original
}
-// historyMatches returns true if the history of the image matches the lines
-// in the Dockerfile till the point of build we are at.
+// historyMatches returns true if a candidate history matches the history of our
+// base image (if we have one), plus the current instruction.
// Used to verify whether a cache of the intermediate image exists and whether
// to run the build again.
-func (b *Executor) historyMatches(children []*parser.Node, history []v1.History) bool {
- i := len(history) - 1
- for j := len(children) - 1; j >= 0; j-- {
- instruction := children[j].Original
- if children[j].Value == "run" {
- instruction = instruction[4:]
- buildArgs := b.getBuildArgs()
- // If a previous image was built with some build-args but the new build process doesn't have any build-args
- // specified, so compare the lengths of the old instruction with the current one
- // 11 is the length of "/bin/sh -c " that is used to run the run commands
- if buildArgs == "" && len(history[i].CreatedBy) > len(instruction)+11 {
- return false
- }
- // There are build-args, so check if anything with the build-args has changed
- if buildArgs != "" && !strings.Contains(history[i].CreatedBy, buildArgs) {
- return false
- }
+func (b *Executor) historyMatches(baseHistory []v1.History, child *parser.Node, history []v1.History) bool {
+ if len(baseHistory) >= len(history) {
+ return false
+ }
+ if len(history)-len(baseHistory) != 1 {
+ return false
+ }
+ for i := range baseHistory {
+ if baseHistory[i].CreatedBy != history[i].CreatedBy {
+ return false
+ }
+ if baseHistory[i].Comment != history[i].Comment {
+ return false
+ }
+ if baseHistory[i].Author != history[i].Author {
+ return false
+ }
+ if baseHistory[i].EmptyLayer != history[i].EmptyLayer {
+ return false
+ }
+ if baseHistory[i].Created != nil && history[i].Created == nil {
+ return false
+ }
+ if baseHistory[i].Created == nil && history[i].Created != nil {
+ return false
}
- if !strings.Contains(history[i].CreatedBy, instruction) {
+ if baseHistory[i].Created != nil && history[i].Created != nil && *baseHistory[i].Created != *history[i].Created {
+ return false
+ }
+ }
+ instruction := child.Original
+ switch strings.ToUpper(child.Value) {
+ case "RUN":
+ instruction = instruction[4:]
+ buildArgs := b.getBuildArgs()
+ // If a previous image was built with some build-args but the new build process doesn't have any build-args
+ // specified, the command might be expanded differently, so compare the lengths of the old instruction with
+ // the current one. 11 is the length of "/bin/sh -c " that is used to run the run commands.
+ if buildArgs == "" && len(history[len(baseHistory)].CreatedBy) > len(instruction)+11 {
+ return false
+ }
+ // There are build-args, so check if anything with the build-args has changed
+ if buildArgs != "" && !strings.Contains(history[len(baseHistory)].CreatedBy, buildArgs) {
+ return false
+ }
+ fallthrough
+ default:
+ if !strings.Contains(history[len(baseHistory)].CreatedBy, instruction) {
return false
}
- i--
}
return true
}
@@ -1816,8 +1855,8 @@ func (b *Executor) deleteSuccessfulIntermediateCtrs() error {
}
func (s *StageExecutor) EnsureContainerPath(path string) error {
- targetPath := filepath.Join(s.mountPoint, path)
- _, err := os.Lstat(targetPath)
+ targetPath, err := securejoin.SecureJoin(s.mountPoint, path)
+ _, err = os.Lstat(targetPath)
if err != nil && os.IsNotExist(err) {
err = os.MkdirAll(targetPath, 0755)
}
diff --git a/vendor/github.com/containers/buildah/new.go b/vendor/github.com/containers/buildah/new.go
index 29546caba..ecd1666bf 100644
--- a/vendor/github.com/containers/buildah/new.go
+++ b/vendor/github.com/containers/buildah/new.go
@@ -351,6 +351,7 @@ func newBuilder(ctx context.Context, store storage.Store, options BuilderOptions
TopLayer: topLayer,
Args: options.Args,
Format: options.Format,
+ TempVolumes: map[string]bool{},
}
if options.Mount {
diff --git a/vendor/github.com/containers/buildah/pkg/cli/common.go b/vendor/github.com/containers/buildah/pkg/cli/common.go
index e7a571db6..23bb696fc 100644
--- a/vendor/github.com/containers/buildah/pkg/cli/common.go
+++ b/vendor/github.com/containers/buildah/pkg/cli/common.go
@@ -7,6 +7,7 @@ package cli
import (
"fmt"
"os"
+ "path/filepath"
"strings"
"github.com/containers/buildah"
@@ -137,7 +138,7 @@ func GetLayerFlags(flags *LayerResults) pflag.FlagSet {
func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs := pflag.FlagSet{}
fs.StringArrayVar(&flags.Annotation, "annotation", []string{}, "Set metadata for an image (default [])")
- fs.StringVar(&flags.Authfile, "authfile", "", "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json")
+ fs.StringVar(&flags.Authfile, "authfile", GetDefaultAuthFile(), "path of the authentication file.")
fs.StringArrayVar(&flags.BuildArg, "build-arg", []string{}, "`argument=value` to supply to the builder")
fs.StringVar(&flags.CacheFrom, "cache-from", "", "Images to utilise as potential cache sources. The build process does not currently support caching so this is a NOOP.")
fs.StringVar(&flags.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry")
@@ -246,3 +247,15 @@ func VerifyFlagsArgsOrder(args []string) error {
}
return nil
}
+
+func GetDefaultAuthFile() string {
+ authfile := os.Getenv("REGISTRY_AUTH_FILE")
+ if authfile != "" {
+ return authfile
+ }
+ runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
+ if runtimeDir != "" {
+ return filepath.Join(runtimeDir, "containers/auth.json")
+ }
+ return ""
+}
diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go
new file mode 100644
index 000000000..31f0c2cec
--- /dev/null
+++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go
@@ -0,0 +1,46 @@
+package overlay
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/containers/storage"
+ "github.com/containers/storage/pkg/idtools"
+ "github.com/opencontainers/runtime-spec/specs-go"
+ "github.com/pkg/errors"
+)
+
+// MountTemp creates a subdir of the contentDir based on the source directory
+// from the source system. It then mounds up the source directory on to the
+// generated mount point and returns the mount point to the caller.
+func MountTemp(store storage.Store, containerId, source, dest string, rootUID, rootGID int) (specs.Mount, string, error) {
+ mount := specs.Mount{}
+
+ contentDir, err := store.ContainerDirectory(containerId)
+ if err != nil {
+ return mount, "", err
+ }
+ upperDir := filepath.Join(contentDir, "upper")
+ workDir := filepath.Join(contentDir, "work")
+ if err := idtools.MkdirAllAs(upperDir, 0700, rootUID, rootGID); err != nil {
+ return mount, "", errors.Wrapf(err, "failed to create the overlay %s directory", upperDir)
+ }
+ if err := idtools.MkdirAllAs(workDir, 0700, rootUID, rootGID); err != nil {
+ return mount, "", errors.Wrapf(err, "failed to create the overlay %s directory", workDir)
+ }
+
+ mount.Source = "overlay"
+ mount.Destination = dest
+ mount.Type = "overlay"
+ mount.Options = strings.Split(fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s,private", source, upperDir, workDir), ",")
+
+ return mount, contentDir, nil
+}
+
+// RemoveTemp removes temporary mountpoint and all content from its parent
+// directory
+func RemoveTemp(contentDir string) error {
+ return os.RemoveAll(contentDir)
+}
diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go
index 070f4d04e..6c58f1194 100644
--- a/vendor/github.com/containers/buildah/pkg/parse/parse.go
+++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go
@@ -14,6 +14,7 @@ import (
"unicode"
"github.com/containers/buildah"
+ "github.com/containers/buildah/pkg/unshare"
"github.com/containers/image/types"
"github.com/containers/storage/pkg/idtools"
"github.com/docker/go-units"
@@ -155,16 +156,16 @@ func ParseVolume(volume string) (specs.Mount, error) {
if len(arr) < 2 {
return mount, errors.Errorf("incorrect volume format %q, should be host-dir:ctr-dir[:option]", volume)
}
- if err := validateVolumeHostDir(arr[0]); err != nil {
+ if err := ValidateVolumeHostDir(arr[0]); err != nil {
return mount, err
}
- if err := validateVolumeCtrDir(arr[1]); err != nil {
+ if err := ValidateVolumeCtrDir(arr[1]); err != nil {
return mount, err
}
mountOptions := ""
if len(arr) > 2 {
mountOptions = arr[2]
- if err := validateVolumeOpts(arr[2]); err != nil {
+ if err := ValidateVolumeOpts(arr[2]); err != nil {
return mount, err
}
}
@@ -189,7 +190,7 @@ func ParseVolumes(volumes []string) error {
return nil
}
-func validateVolumeHostDir(hostDir string) error {
+func ValidateVolumeHostDir(hostDir string) error {
if !filepath.IsAbs(hostDir) {
return errors.Errorf("invalid host path, must be an absolute path %q", hostDir)
}
@@ -199,14 +200,14 @@ func validateVolumeHostDir(hostDir string) error {
return nil
}
-func validateVolumeCtrDir(ctrDir string) error {
+func ValidateVolumeCtrDir(ctrDir string) error {
if !filepath.IsAbs(ctrDir) {
return errors.Errorf("invalid container path, must be an absolute path %q", ctrDir)
}
return nil
}
-func validateVolumeOpts(option string) error {
+func ValidateVolumeOpts(option string) error {
var foundRootPropagation, foundRWRO, foundLabelChange int
options := strings.Split(option, ",")
for _, opt := range options {
@@ -216,9 +217,12 @@ func validateVolumeOpts(option string) error {
return errors.Errorf("invalid options %q, can only specify 1 'rw' or 'ro' option", option)
}
foundRWRO++
- case "z", "Z":
+ case "z", "Z", "O":
+ if opt == "O" && unshare.IsRootless() {
+ return errors.Errorf("invalid options %q, overlay mounts not supported in rootless mode", option)
+ }
if foundLabelChange > 1 {
- return errors.Errorf("invalid options %q, can only specify 1 'z' or 'Z' option", option)
+ return errors.Errorf("invalid options %q, can only specify 1 'z', 'Z', or 'O' option", option)
}
foundLabelChange++
case "private", "rprivate", "shared", "rshared", "slave", "rslave", "unbindable", "runbindable":
diff --git a/vendor/github.com/containers/buildah/pull.go b/vendor/github.com/containers/buildah/pull.go
index 5eec1b3dd..66e573fa7 100644
--- a/vendor/github.com/containers/buildah/pull.go
+++ b/vendor/github.com/containers/buildah/pull.go
@@ -2,7 +2,6 @@ package buildah
import (
"context"
- "fmt"
"io"
"strings"
@@ -152,8 +151,9 @@ func localImageNameForReference(ctx context.Context, store storage.Store, srcRef
return name, nil
}
-// Pull copies the contents of the image from somewhere else to local storage.
-func Pull(ctx context.Context, imageName string, options PullOptions) error {
+// Pull copies the contents of the image from somewhere else to local storage. Returns the
+// ID of the local image or an error.
+func Pull(ctx context.Context, imageName string, options PullOptions) (imageID string, err error) {
systemContext := getSystemContext(options.Store, options.SystemContext, options.SignaturePolicyPath)
boptions := BuilderOptions{
@@ -166,23 +166,23 @@ func Pull(ctx context.Context, imageName string, options PullOptions) error {
storageRef, transport, img, err := resolveImage(ctx, systemContext, options.Store, boptions)
if err != nil {
- return err
+ return "", err
}
var errs *multierror.Error
if options.AllTags {
if transport != util.DefaultTransport {
- return errors.New("Non-docker transport is not supported, for --all-tags pulling")
+ return "", errors.New("Non-docker transport is not supported, for --all-tags pulling")
}
repo := reference.TrimNamed(storageRef.DockerReference())
dockerRef, err := docker.NewReference(reference.TagNameOnly(storageRef.DockerReference()))
if err != nil {
- return errors.Wrapf(err, "internal error creating docker.Transport reference for %s", storageRef.DockerReference().String())
+ return "", errors.Wrapf(err, "internal error creating docker.Transport reference for %s", storageRef.DockerReference().String())
}
tags, err := docker.GetRepositoryTags(ctx, systemContext, dockerRef)
if err != nil {
- return errors.Wrapf(err, "error getting repository tags")
+ return "", errors.Wrapf(err, "error getting repository tags")
}
for _, tag := range tags {
tagged, err := reference.WithTag(repo, tag)
@@ -192,7 +192,7 @@ func Pull(ctx context.Context, imageName string, options PullOptions) error {
}
taggedRef, err := docker.NewReference(tagged)
if err != nil {
- return errors.Wrapf(err, "internal error creating docker.Transport reference for %s", tagged.String())
+ return "", errors.Wrapf(err, "internal error creating docker.Transport reference for %s", tagged.String())
}
if options.ReportWriter != nil {
options.ReportWriter.Write([]byte("Pulling " + tagged.String() + "\n"))
@@ -207,13 +207,13 @@ func Pull(ctx context.Context, imageName string, options PullOptions) error {
errs = multierror.Append(errs, err)
continue
}
- fmt.Printf("%s\n", taggedImg.ID)
+ imageID = taggedImg.ID
}
} else {
- fmt.Printf("%s\n", img.ID)
+ imageID = img.ID
}
- return errs.ErrorOrNil()
+ return imageID, errs.ErrorOrNil()
}
func pullImage(ctx context.Context, store storage.Store, srcRef types.ImageReference, options PullOptions, sc *types.SystemContext) (types.ImageReference, error) {
diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go
index 1acf655eb..81ce2b944 100644
--- a/vendor/github.com/containers/buildah/run_linux.go
+++ b/vendor/github.com/containers/buildah/run_linux.go
@@ -23,6 +23,7 @@ import (
"github.com/containernetworking/cni/libcni"
"github.com/containers/buildah/bind"
"github.com/containers/buildah/chroot"
+ "github.com/containers/buildah/pkg/overlay"
"github.com/containers/buildah/pkg/secrets"
"github.com/containers/buildah/pkg/unshare"
"github.com/containers/buildah/util"
@@ -184,6 +185,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
if err != nil {
return errors.Wrapf(err, "error resolving mountpoints for container %q", b.ContainerID)
}
+ defer b.cleanupTempVolumes()
if options.CNIConfigDir == "" {
options.CNIConfigDir = b.CNIConfigDir
@@ -214,7 +216,7 @@ func (b *Builder) Run(command []string, options RunOptions) error {
if options.NoPivot {
moreCreateArgs = append(moreCreateArgs, "--no-pivot")
}
- if err := setupRootlessSpecChanges(spec, path, rootUID, rootGID); err != nil {
+ if err := setupRootlessSpecChanges(spec, path, rootUID, rootGID, b.CommonBuildOpts.ShmSize); err != nil {
return err
}
err = b.runUsingRuntimeSubproc(isolation, options, configureNetwork, configureNetworks, moreCreateArgs, spec, mountPoint, path, Package+"-"+filepath.Base(path))
@@ -438,7 +440,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st
}
// Get the list of explicitly-specified volume mounts.
- volumes, err := runSetupVolumeMounts(spec.Linux.MountLabel, volumeMounts, optionMounts)
+ volumes, err := b.runSetupVolumeMounts(spec.Linux.MountLabel, volumeMounts, optionMounts, int(rootUID), int(rootGID))
if err != nil {
return err
}
@@ -1537,11 +1539,21 @@ func addRlimits(ulimit []string, g *generate.Generator) error {
return nil
}
-func runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount) ([]specs.Mount, error) {
- var mounts []specs.Mount
+func (b *Builder) cleanupTempVolumes() {
+ for tempVolume, val := range b.TempVolumes {
+ if val {
+ if err := overlay.RemoveTemp(tempVolume); err != nil {
+ logrus.Errorf(err.Error())
+ }
+ b.TempVolumes[tempVolume] = false
+ }
+ }
+}
+
+func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts []specs.Mount, rootUID, rootGID int) (mounts []specs.Mount, Err error) {
parseMount := func(host, container string, options []string) (specs.Mount, error) {
- var foundrw, foundro, foundz, foundZ bool
+ var foundrw, foundro, foundz, foundZ, foundO bool
var rootProp string
for _, opt := range options {
switch opt {
@@ -1553,6 +1565,8 @@ func runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts
foundz = true
case "Z":
foundZ = true
+ case "O":
+ foundO = true
case "private", "rprivate", "slave", "rslave", "shared", "rshared":
rootProp = opt
}
@@ -1570,6 +1584,14 @@ func runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts
return specs.Mount{}, errors.Wrapf(err, "relabeling %q failed", host)
}
}
+ if foundO {
+ overlayMount, contentDir, err := overlay.MountTemp(b.store, b.ContainerID, host, container, rootUID, rootGID)
+ if err == nil {
+
+ b.TempVolumes[contentDir] = true
+ }
+ return overlayMount, err
+ }
if rootProp == "" {
options = append(options, "private")
}
@@ -1577,13 +1599,14 @@ func runSetupVolumeMounts(mountLabel string, volumeMounts []string, optionMounts
Destination: container,
Type: "bind",
Source: host,
- Options: options,
+ Options: append(options, "rbind"),
}, nil
}
+
// Bind mount volumes specified for this particular Run() invocation
for _, i := range optionMounts {
logrus.Debugf("setting up mounted volume at %q", i.Destination)
- mount, err := parseMount(i.Source, i.Destination, append(i.Options, "rbind"))
+ mount, err := parseMount(i.Source, i.Destination, i.Options)
if err != nil {
return nil, err
}
@@ -1809,7 +1832,7 @@ func (b *Builder) configureEnvironment(g *generate.Generator, options RunOptions
}
}
-func setupRootlessSpecChanges(spec *specs.Spec, bundleDir string, rootUID, rootGID uint32) error {
+func setupRootlessSpecChanges(spec *specs.Spec, bundleDir string, rootUID, rootGID uint32, shmSize string) error {
spec.Hostname = ""
spec.Process.User.AdditionalGids = nil
spec.Linux.Resources = nil
@@ -1843,7 +1866,7 @@ func setupRootlessSpecChanges(spec *specs.Spec, bundleDir string, rootUID, rootG
Source: "shm",
Destination: "/dev/shm",
Type: "tmpfs",
- Options: []string{"private", "nodev", "noexec", "nosuid", "mode=1777", "size=65536k"},
+ Options: []string{"private", "nodev", "noexec", "nosuid", "mode=1777", fmt.Sprintf("size=%s", shmSize)},
},
{
Source: "/proc",
diff --git a/vendor/github.com/containers/buildah/util/util.go b/vendor/github.com/containers/buildah/util/util.go
index 629d9748c..30afe8313 100644
--- a/vendor/github.com/containers/buildah/util/util.go
+++ b/vendor/github.com/containers/buildah/util/util.go
@@ -112,7 +112,7 @@ func ResolveName(name string, firstRegistry string, sc *types.SystemContext, sto
}
for _, registry := range searchRegistries {
if !registry.Blocked {
- registries = append(registries, registry.URL)
+ registries = append(registries, registry.Location)
}
}
searchRegistriesAreEmpty := len(registries) == 0
diff --git a/vendor/github.com/containers/buildah/vendor.conf b/vendor/github.com/containers/buildah/vendor.conf
index 051f98ab8..0c982626a 100644
--- a/vendor/github.com/containers/buildah/vendor.conf
+++ b/vendor/github.com/containers/buildah/vendor.conf
@@ -3,12 +3,12 @@ github.com/blang/semver v3.5.0
github.com/BurntSushi/toml v0.2.0
github.com/containerd/continuity 004b46473808b3e7a4a3049c20e4376c91eb966d
github.com/containernetworking/cni v0.7.0-rc2
-github.com/containers/image f52cf78ebfa1916da406f8b6210d8f7764ec1185
+github.com/containers/image 9467ac9cfd92c545aa389f22f27e552de053c0f2
+github.com/cyphar/filepath-securejoin v0.2.1
github.com/vbauerster/mpb v3.3.4
github.com/mattn/go-isatty v0.0.4
github.com/VividCortex/ewma v1.1.1
-github.com/boltdb/bolt v1.3.1
-github.com/containers/storage v1.12.6
+github.com/containers/storage v1.12.7
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
github.com/docker/docker 54dddadc7d5d89fe0be88f76979f6f6ab0dede83
github.com/docker/docker-credential-helpers v0.6.1
@@ -66,3 +66,4 @@ github.com/onsi/gomega v1.4.3
github.com/spf13/cobra v0.0.3
github.com/spf13/pflag v1.0.3
github.com/ishidawataru/sctp 07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb
+github.com/etcd-io/bbolt v1.3.2
diff --git a/vendor/github.com/containers/image/docker/docker_client.go b/vendor/github.com/containers/image/docker/docker_client.go
index 40f11c62a..81a43e0fa 100644
--- a/vendor/github.com/containers/image/docker/docker_client.go
+++ b/vendor/github.com/containers/image/docker/docker_client.go
@@ -23,7 +23,7 @@ import (
"github.com/containers/image/types"
"github.com/docker/distribution/registry/client"
"github.com/docker/go-connections/tlsconfig"
- "github.com/opencontainers/go-digest"
+ digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -81,28 +81,30 @@ type bearerToken struct {
// dockerClient is configuration for dealing with a single Docker registry.
type dockerClient struct {
// The following members are set by newDockerClient and do not change afterwards.
- sys *types.SystemContext
- registry string
- client *http.Client
- insecureSkipTLSVerify bool
+ sys *types.SystemContext
+ registry string
+ // tlsClientConfig is setup by newDockerClient and will be used and updated
+ // by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime.
+ tlsClientConfig *tls.Config
// The following members are not set by newDockerClient and must be set by callers if needed.
username string
password string
signatureBase signatureStorageBase
scope authScope
+
// The following members are detected registry properties:
// They are set after a successful detectProperties(), and never change afterwards.
- scheme string // Empty value also used to indicate detectProperties() has not yet succeeded.
+ client *http.Client
+ scheme string
challenges []challenge
supportsSignatures bool
// Private state for setupRequestAuth (key: string, value: bearerToken)
tokenCache sync.Map
- // detectPropertiesError caches the initial error.
- detectPropertiesError error
- // detectPropertiesOnce is used to execuute detectProperties() at most once in in makeRequest().
- detectPropertiesOnce sync.Once
+ // Private state for detectProperties:
+ detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once.
+ detectPropertiesError error // detectPropertiesError caches the initial error.
}
type authScope struct {
@@ -229,8 +231,7 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
if registry == dockerHostname {
registry = dockerRegistry
}
- tr := tlsclientconfig.NewTransport()
- tr.TLSClientConfig = serverDefault()
+ tlsClientConfig := serverDefault()
// It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry,
// because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible
@@ -241,38 +242,31 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
if err != nil {
return nil, err
}
- if err := tlsclientconfig.SetupCertificates(certDir, tr.TLSClientConfig); err != nil {
+ if err := tlsclientconfig.SetupCertificates(certDir, tlsClientConfig); err != nil {
return nil, err
}
// Check if TLS verification shall be skipped (default=false) which can
- // either be specified in the sysregistriesv2 configuration or via the
- // SystemContext, whereas the SystemContext is prioritized.
+ // be specified in the sysregistriesv2 configuration.
skipVerify := false
- if sys != nil && sys.DockerInsecureSkipTLSVerify != types.OptionalBoolUndefined {
- // Only use the SystemContext if the actual value is defined.
- skipVerify = sys.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue
- } else {
- reg, err := sysregistriesv2.FindRegistry(sys, reference)
- if err != nil {
- return nil, errors.Wrapf(err, "error loading registries")
- }
- if reg != nil {
- skipVerify = reg.Insecure
- }
+ reg, err := sysregistriesv2.FindRegistry(sys, reference)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error loading registries")
+ }
+ if reg != nil {
+ skipVerify = reg.Insecure
}
- tr.TLSClientConfig.InsecureSkipVerify = skipVerify
+ tlsClientConfig.InsecureSkipVerify = skipVerify
return &dockerClient{
- sys: sys,
- registry: registry,
- client: &http.Client{Transport: tr},
- insecureSkipTLSVerify: skipVerify,
+ sys: sys,
+ registry: registry,
+ tlsClientConfig: tlsClientConfig,
}, nil
}
// CheckAuth validates the credentials by attempting to log into the registry
-// returns an error if an error occcured while making the http request or the status code received was 401
+// returns an error if an error occurred while making the http request or the status code received was 401
func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error {
client, err := newDockerClient(sys, registry, registry)
if err != nil {
@@ -557,9 +551,14 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
// detectPropertiesHelper performs the work of detectProperties which executes
// it at most once.
func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
- if c.scheme != "" {
- return nil
+ // We overwrite the TLS clients `InsecureSkipVerify` only if explicitly
+ // specified by the system context
+ if c.sys != nil && c.sys.DockerInsecureSkipTLSVerify != types.OptionalBoolUndefined {
+ c.tlsClientConfig.InsecureSkipVerify = c.sys.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue
}
+ tr := tlsclientconfig.NewTransport()
+ tr.TLSClientConfig = c.tlsClientConfig
+ c.client = &http.Client{Transport: tr}
ping := func(scheme string) error {
url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)
@@ -579,7 +578,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
return nil
}
err := ping("https")
- if err != nil && c.insecureSkipTLSVerify {
+ if err != nil && c.tlsClientConfig.InsecureSkipVerify {
err = ping("http")
}
if err != nil {
@@ -603,7 +602,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
return true
}
isV1 := pingV1("https")
- if !isV1 && c.insecureSkipTLSVerify {
+ if !isV1 && c.tlsClientConfig.InsecureSkipVerify {
isV1 = pingV1("http")
}
if isV1 {
diff --git a/vendor/github.com/containers/image/docker/docker_image.go b/vendor/github.com/containers/image/docker/docker_image.go
index 530c7513e..744667f54 100644
--- a/vendor/github.com/containers/image/docker/docker_image.go
+++ b/vendor/github.com/containers/image/docker/docker_image.go
@@ -25,7 +25,7 @@ type Image struct {
// a client to the registry hosting the given image.
// The caller must call .Close() on the returned Image.
func newImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) (types.ImageCloser, error) {
- s, err := newImageSource(sys, ref)
+ s, err := newImageSource(ctx, sys, ref)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/image/docker/docker_image_src.go b/vendor/github.com/containers/image/docker/docker_image_src.go
index 8367792bf..c8fdb407c 100644
--- a/vendor/github.com/containers/image/docker/docker_image_src.go
+++ b/vendor/github.com/containers/image/docker/docker_image_src.go
@@ -13,9 +13,10 @@ import (
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
+ "github.com/containers/image/pkg/sysregistriesv2"
"github.com/containers/image/types"
"github.com/docker/distribution/registry/client"
- "github.com/opencontainers/go-digest"
+ digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -28,17 +29,94 @@ type dockerImageSource struct {
cachedManifestMIMEType string // Only valid if cachedManifest != nil
}
-// newImageSource creates a new ImageSource for the specified image reference.
-// The caller must call .Close() on the returned ImageSource.
-func newImageSource(sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) {
- c, err := newDockerClientFromRef(sys, ref, false, "pull")
+// newImageSource creates a new `ImageSource` for the specified image reference
+// `ref`.
+//
+// The following steps will be done during the instance creation:
+//
+// - Lookup the registry within the configured location in
+// `sys.SystemRegistriesConfPath`. If there is no configured registry available,
+// we fallback to the provided docker reference `ref`.
+//
+// - References which contain a configured prefix will be automatically rewritten
+// to the correct target reference. For example, if the configured
+// `prefix = "example.com/foo"`, `location = "example.com"` and the image will be
+// pulled from the ref `example.com/foo/image`, then the resulting pull will
+// effectively point to `example.com/image`.
+//
+// - If the rewritten reference succeeds, it will be used as the `dockerRef`
+// in the client. If the rewrite fails, the function immediately returns an error.
+//
+// - Each mirror will be used (in the configured order) to test the
+// availability of the image manifest on the remote location. For example,
+// if the manifest is not reachable due to connectivity issues, then the next
+// mirror will be tested instead. If no mirror is configured or contains the
+// target manifest, then the initial `ref` will be tested as fallback. The
+// creation of the new `dockerImageSource` only succeeds if a remote
+// location with the available manifest was found.
+//
+// A cleanup call to `.Close()` is needed if the caller is done using the returned
+// `ImageSource`.
+func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) {
+ registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name())
if err != nil {
- return nil, err
+ return nil, errors.Wrapf(err, "error loading registries configuration")
+ }
+
+ if registry == nil {
+ // No configuration was found for the provided reference, so we create
+ // a fallback registry by hand to make the client creation below work
+ // as intended.
+ registry = &sysregistriesv2.Registry{
+ Endpoint: sysregistriesv2.Endpoint{
+ Location: ref.ref.String(),
+ },
+ Prefix: ref.ref.String(),
+ }
+ }
+
+ primaryDomain := reference.Domain(ref.ref)
+ // Found the registry within the sysregistriesv2 configuration. Now we test
+ // all endpoints for the manifest availability. If a working image source
+ // was found, it will be used for all future pull actions.
+ manifestLoadErr := errors.New("Internal error: newImageSource returned without trying any endpoint")
+ for _, endpoint := range append(registry.Mirrors, registry.Endpoint) {
+ logrus.Debugf("Trying to pull %q from endpoint %q", ref.ref, endpoint.Location)
+
+ newRef, err := endpoint.RewriteReference(ref.ref, registry.Prefix)
+ if err != nil {
+ return nil, err
+ }
+ dockerRef, err := newReference(newRef)
+ if err != nil {
+ return nil, err
+ }
+
+ endpointSys := sys
+ // sys.DockerAuthConfig does not explicitly specify a registry; we must not blindly send the credentials intended for the primary endpoint to mirrors.
+ if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(dockerRef.ref) != primaryDomain {
+ copy := *endpointSys
+ copy.DockerAuthConfig = nil
+ endpointSys = &copy
+ }
+
+ client, err := newDockerClientFromRef(endpointSys, dockerRef, false, "pull")
+ if err != nil {
+ return nil, err
+ }
+ client.tlsClientConfig.InsecureSkipVerify = endpoint.Insecure
+
+ testImageSource := &dockerImageSource{
+ ref: dockerRef,
+ c: client,
+ }
+
+ manifestLoadErr = testImageSource.ensureManifestIsLoaded(ctx)
+ if manifestLoadErr == nil {
+ return testImageSource, nil
+ }
}
- return &dockerImageSource{
- ref: ref,
- c: c,
- }, nil
+ return nil, manifestLoadErr
}
// Reference returns the reference used to set up this source, _as specified by the user_
diff --git a/vendor/github.com/containers/image/docker/docker_transport.go b/vendor/github.com/containers/image/docker/docker_transport.go
index 3c67efb4a..45da7c96f 100644
--- a/vendor/github.com/containers/image/docker/docker_transport.go
+++ b/vendor/github.com/containers/image/docker/docker_transport.go
@@ -61,8 +61,13 @@ func ParseReference(refString string) (types.ImageReference, error) {
// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly().
func NewReference(ref reference.Named) (types.ImageReference, error) {
+ return newReference(ref)
+}
+
+// newReference returns a dockerReference for a named reference.
+func newReference(ref reference.Named) (dockerReference, error) {
if reference.IsNameOnly(ref) {
- return nil, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
+ return dockerReference{}, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
}
// A github.com/distribution/reference value can have a tag and a digest at the same time!
// The docker/distribution API does not really support that (we can’t ask for an image with a specific
@@ -72,8 +77,9 @@ func NewReference(ref reference.Named) (types.ImageReference, error) {
_, isTagged := ref.(reference.NamedTagged)
_, isDigested := ref.(reference.Canonical)
if isTagged && isDigested {
- return nil, errors.Errorf("Docker references with both a tag and digest are currently not supported")
+ return dockerReference{}, errors.Errorf("Docker references with both a tag and digest are currently not supported")
}
+
return dockerReference{
ref: ref,
}, nil
@@ -135,7 +141,7 @@ func (ref dockerReference) NewImage(ctx context.Context, sys *types.SystemContex
// NewImageSource returns a types.ImageSource for this reference.
// The caller must call .Close() on the returned ImageSource.
func (ref dockerReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
- return newImageSource(sys, ref)
+ return newImageSource(ctx, sys, ref)
}
// NewImageDestination returns a types.ImageDestination for this reference.
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go b/vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go
index 91d4e9137..19d0a6c80 100644
--- a/vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/boltdb/boltdb.go
@@ -7,9 +7,9 @@ import (
"sync"
"time"
- "github.com/boltdb/bolt"
"github.com/containers/image/pkg/blobinfocache/internal/prioritize"
"github.com/containers/image/types"
+ bolt "github.com/etcd-io/bbolt"
"github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
diff --git a/vendor/github.com/containers/image/pkg/blobinfocache/default.go b/vendor/github.com/containers/image/pkg/blobinfocache/default.go
index 1e6e543b2..357333215 100644
--- a/vendor/github.com/containers/image/pkg/blobinfocache/default.go
+++ b/vendor/github.com/containers/image/pkg/blobinfocache/default.go
@@ -4,6 +4,7 @@ import (
"fmt"
"os"
"path/filepath"
+ "strconv"
"github.com/containers/image/pkg/blobinfocache/boltdb"
"github.com/containers/image/pkg/blobinfocache/memory"
@@ -47,9 +48,18 @@ func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) {
return filepath.Join(dataDir, "containers", "cache"), nil
}
+func getRootlessUID() int {
+ uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID")
+ if uidEnv != "" {
+ u, _ := strconv.Atoi(uidEnv)
+ return u
+ }
+ return os.Geteuid()
+}
+
// DefaultCache returns the default BlobInfoCache implementation appropriate for sys.
func DefaultCache(sys *types.SystemContext) types.BlobInfoCache {
- dir, err := blobInfoCacheDir(sys, os.Geteuid())
+ dir, err := blobInfoCacheDir(sys, getRootlessUID())
if err != nil {
logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename)
return memory.New()
diff --git a/vendor/github.com/containers/image/pkg/docker/config/config.go b/vendor/github.com/containers/image/pkg/docker/config/config.go
index 1f576253d..57b548e26 100644
--- a/vendor/github.com/containers/image/pkg/docker/config/config.go
+++ b/vendor/github.com/containers/image/pkg/docker/config/config.go
@@ -85,21 +85,6 @@ func GetAuthentication(sys *types.SystemContext, registry string) (string, strin
return "", "", nil
}
-// GetUserLoggedIn returns the username logged in to registry from either
-// auth.json or XDG_RUNTIME_DIR
-// Used to tell the user if someone is logged in to the registry when logging in
-func GetUserLoggedIn(sys *types.SystemContext, registry string) (string, error) {
- path, err := getPathToAuth(sys)
- if err != nil {
- return "", err
- }
- username, _, _ := findAuthentication(registry, path, false)
- if username != "" {
- return username, nil
- }
- return "", nil
-}
-
// RemoveAuthentication deletes the credentials stored in auth.json
func RemoveAuthentication(sys *types.SystemContext, registry string) error {
return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) {
diff --git a/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
index 3d0bb0df2..99ae65774 100644
--- a/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
+++ b/vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
@@ -10,6 +10,10 @@ import (
"github.com/BurntSushi/toml"
"github.com/containers/image/types"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+
+ "github.com/containers/image/docker/reference"
)
// systemRegistriesConfPath is the path to the system-wide registry
@@ -22,34 +26,48 @@ var systemRegistriesConfPath = builtinRegistriesConfPath
// DO NOT change this, instead see systemRegistriesConfPath above.
const builtinRegistriesConfPath = "/etc/containers/registries.conf"
-// Mirror represents a mirror. Mirrors can be used as pull-through caches for
-// registries.
-type Mirror struct {
- // The mirror's URL.
- URL string `toml:"url"`
+// Endpoint describes a remote location of a registry.
+type Endpoint struct {
+ // The endpoint's remote location.
+ Location string `toml:"location"`
// If true, certs verification will be skipped and HTTP (non-TLS)
// connections will be allowed.
Insecure bool `toml:"insecure"`
}
+// RewriteReference will substitute the provided reference `prefix` to the
+// endpoints `location` from the `ref` and creates a new named reference from it.
+// The function errors if the newly created reference is not parsable.
+func (e *Endpoint) RewriteReference(ref reference.Named, prefix string) (reference.Named, error) {
+ refString := ref.String()
+ if !refMatchesPrefix(refString, prefix) {
+ return nil, fmt.Errorf("invalid prefix '%v' for reference '%v'", prefix, refString)
+ }
+
+ newNamedRef := strings.Replace(refString, prefix, e.Location, 1)
+ newParsedRef, err := reference.ParseNamed(newNamedRef)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error rewriting reference")
+ }
+ logrus.Debugf("reference rewritten from '%v' to '%v'", refString, newParsedRef.String())
+ return newParsedRef, nil
+}
+
// Registry represents a registry.
type Registry struct {
- // Serializable registry URL.
- URL string `toml:"url"`
+ // A registry is an Endpoint too
+ Endpoint
// The registry's mirrors.
- Mirrors []Mirror `toml:"mirror"`
+ Mirrors []Endpoint `toml:"mirror"`
// If true, pulling from the registry will be blocked.
Blocked bool `toml:"blocked"`
- // If true, certs verification will be skipped and HTTP (non-TLS)
- // connections will be allowed.
- Insecure bool `toml:"insecure"`
// If true, the registry can be used when pulling an unqualified image.
Search bool `toml:"unqualified-search"`
// Prefix is used for matching images, and to translate one namespace to
- // another. If `Prefix="example.com/bar"`, `URL="example.com/foo/bar"`
+ // another. If `Prefix="example.com/bar"`, `location="example.com/foo/bar"`
// and we pull from "example.com/bar/myimage:latest", the image will
// effectively be pulled from "example.com/foo/bar/myimage:latest".
- // If no Prefix is specified, it defaults to the specified URL.
+ // If no Prefix is specified, it defaults to the specified location.
Prefix string `toml:"prefix"`
}
@@ -84,18 +102,18 @@ func (e *InvalidRegistries) Error() string {
return e.s
}
-// parseURL parses the input string, performs some sanity checks and returns
+// parseLocation parses the input string, performs some sanity checks and returns
// the sanitized input string. An error is returned if the input string is
// empty or if contains an "http{s,}://" prefix.
-func parseURL(input string) (string, error) {
+func parseLocation(input string) (string, error) {
trimmed := strings.TrimRight(input, "/")
if trimmed == "" {
- return "", &InvalidRegistries{s: "invalid URL: cannot be empty"}
+ return "", &InvalidRegistries{s: "invalid location: cannot be empty"}
}
if strings.HasPrefix(trimmed, "http://") || strings.HasPrefix(trimmed, "https://") {
- msg := fmt.Sprintf("invalid URL '%s': URI schemes are not supported", input)
+ msg := fmt.Sprintf("invalid location '%s': URI schemes are not supported", input)
return "", &InvalidRegistries{s: msg}
}
@@ -111,21 +129,21 @@ func getV1Registries(config *tomlConfig) ([]Registry, error) {
// to minimize behavior inconsistency and not contribute to difficult-to-reproduce situations.
registryOrder := []string{}
- getRegistry := func(url string) (*Registry, error) { // Note: _pointer_ to a long-lived object
+ getRegistry := func(location string) (*Registry, error) { // Note: _pointer_ to a long-lived object
var err error
- url, err = parseURL(url)
+ location, err = parseLocation(location)
if err != nil {
return nil, err
}
- reg, exists := regMap[url]
+ reg, exists := regMap[location]
if !exists {
reg = &Registry{
- URL: url,
- Mirrors: []Mirror{},
- Prefix: url,
+ Endpoint: Endpoint{Location: location},
+ Mirrors: []Endpoint{},
+ Prefix: location,
}
- regMap[url] = reg
- registryOrder = append(registryOrder, url)
+ regMap[location] = reg
+ registryOrder = append(registryOrder, location)
}
return reg, nil
}
@@ -155,15 +173,15 @@ func getV1Registries(config *tomlConfig) ([]Registry, error) {
}
registries := []Registry{}
- for _, url := range registryOrder {
- reg := regMap[url]
+ for _, location := range registryOrder {
+ reg := regMap[location]
registries = append(registries, *reg)
}
return registries, nil
}
// postProcessRegistries checks the consistency of all registries (e.g., set
-// the Prefix to URL if not set) and applies conflict checks. It returns an
+// the Prefix to Location if not set) and applies conflict checks. It returns an
// array of cleaned registries and error in case of conflicts.
func postProcessRegistries(regs []Registry) ([]Registry, error) {
var registries []Registry
@@ -172,16 +190,16 @@ func postProcessRegistries(regs []Registry) ([]Registry, error) {
for _, reg := range regs {
var err error
- // make sure URL and Prefix are valid
- reg.URL, err = parseURL(reg.URL)
+ // make sure Location and Prefix are valid
+ reg.Location, err = parseLocation(reg.Location)
if err != nil {
return nil, err
}
if reg.Prefix == "" {
- reg.Prefix = reg.URL
+ reg.Prefix = reg.Location
} else {
- reg.Prefix, err = parseURL(reg.Prefix)
+ reg.Prefix, err = parseLocation(reg.Prefix)
if err != nil {
return nil, err
}
@@ -189,13 +207,13 @@ func postProcessRegistries(regs []Registry) ([]Registry, error) {
// make sure mirrors are valid
for _, mir := range reg.Mirrors {
- mir.URL, err = parseURL(mir.URL)
+ mir.Location, err = parseLocation(mir.Location)
if err != nil {
return nil, err
}
}
registries = append(registries, reg)
- regMap[reg.URL] = append(regMap[reg.URL], reg)
+ regMap[reg.Location] = append(regMap[reg.Location], reg)
}
// Given a registry can be mentioned multiple times (e.g., to have
@@ -205,15 +223,15 @@ func postProcessRegistries(regs []Registry) ([]Registry, error) {
// Note: we need to iterate over the registries array to ensure a
// deterministic behavior which is not guaranteed by maps.
for _, reg := range registries {
- others, _ := regMap[reg.URL]
+ others, _ := regMap[reg.Location]
for _, other := range others {
if reg.Insecure != other.Insecure {
- msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'insecure' setting", reg.URL)
+ msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'insecure' setting", reg.Location)
return nil, &InvalidRegistries{s: msg}
}
if reg.Blocked != other.Blocked {
- msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'blocked' setting", reg.URL)
+ msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'blocked' setting", reg.Location)
return nil, &InvalidRegistries{s: msg}
}
}
diff --git a/vendor/github.com/containers/image/storage/storage_transport.go b/vendor/github.com/containers/image/storage/storage_transport.go
index 3a6be6e00..c9a05e6c0 100644
--- a/vendor/github.com/containers/image/storage/storage_transport.go
+++ b/vendor/github.com/containers/image/storage/storage_transport.go
@@ -4,7 +4,6 @@ package storage
import (
"fmt"
- "os"
"path/filepath"
"strings"
@@ -181,7 +180,7 @@ func (s *storageTransport) GetStore() (storage.Store, error) {
// Return the transport's previously-set store. If we don't have one
// of those, initialize one now.
if s.store == nil {
- options, err := storage.DefaultStoreOptions(os.Getuid() != 0, os.Getuid())
+ options, err := storage.DefaultStoreOptionsAutoDetectUID()
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/containers/image/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/transports/alltransports/alltransports.go
index 23b2782f6..2335c567f 100644
--- a/vendor/github.com/containers/image/transports/alltransports/alltransports.go
+++ b/vendor/github.com/containers/image/transports/alltransports/alltransports.go
@@ -22,6 +22,7 @@ import (
// ParseImageName converts a URL-like image name to a types.ImageReference.
func ParseImageName(imgName string) (types.ImageReference, error) {
+ // Keep this in sync with TransportFromImageName!
parts := strings.SplitN(imgName, ":", 2)
if len(parts) != 2 {
return nil, errors.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName)
@@ -32,3 +33,14 @@ func ParseImageName(imgName string) (types.ImageReference, error) {
}
return transport.ParseReference(parts[1])
}
+
+// TransportFromImageName converts an URL-like name to a types.ImageTransport or nil when
+// the transport is unknown or when the input is invalid.
+func TransportFromImageName(imageName string) types.ImageTransport {
+ // Keep this in sync with ParseImageName!
+ parts := strings.SplitN(imageName, ":", 2)
+ if len(parts) == 2 {
+ return transports.Get(parts[0])
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go
index 9fdab2314..789504348 100644
--- a/vendor/github.com/containers/image/types/types.go
+++ b/vendor/github.com/containers/image/types/types.go
@@ -401,6 +401,7 @@ type ImageInspectInfo struct {
}
// DockerAuthConfig contains authorization information for connecting to a registry.
+// the value of Username and Password can be empty for accessing the registry anonymously
type DockerAuthConfig struct {
Username string
Password string
diff --git a/vendor/github.com/containers/image/vendor.conf b/vendor/github.com/containers/image/vendor.conf
index 89b29722b..438cab17a 100644
--- a/vendor/github.com/containers/image/vendor.conf
+++ b/vendor/github.com/containers/image/vendor.conf
@@ -1,7 +1,7 @@
github.com/containers/image
github.com/sirupsen/logrus v1.0.0
-github.com/containers/storage v1.12.1
+github.com/containers/storage v1.12.2
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
@@ -13,7 +13,6 @@ github.com/containerd/continuity d8fb8589b0e8e85b8c8bbaa8840226d0dfeb7371
github.com/ghodss/yaml 04f313413ffd65ce25f2541bfd2b2ceec5c0908c
github.com/gorilla/mux 94e7d24fd285520f3d12ae998f7fdd6b5393d453
github.com/imdario/mergo 50d4dbd4eb0e84778abe37cefef140271d96fade
-github.com/mattn/go-runewidth 14207d285c6c197daabb5c9793d63e7af9ab2d50
github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
@@ -43,7 +42,7 @@ github.com/syndtr/gocapability master
github.com/Microsoft/go-winio ab35fc04b6365e8fcb18e6e9e41ea4a02b10b175
github.com/Microsoft/hcsshim eca7177590cdcbd25bbc5df27e3b693a54b53a6a
github.com/ulikunitz/xz v0.5.4
-github.com/boltdb/bolt master
+github.com/etcd-io/bbolt v1.3.2
github.com/klauspost/pgzip v1.2.1
github.com/klauspost/compress v1.4.1
github.com/klauspost/cpuid v1.2.0
diff --git a/vendor/github.com/containers/image/version/version.go b/vendor/github.com/containers/image/version/version.go
index 9915cb2fa..184274736 100644
--- a/vendor/github.com/containers/image/version/version.go
+++ b/vendor/github.com/containers/image/version/version.go
@@ -4,11 +4,11 @@ import "fmt"
const (
// VersionMajor is for an API incompatible changes
- VersionMajor = 0
+ VersionMajor = 1
// VersionMinor is for functionality in a backwards-compatible manner
- VersionMinor = 1
+ VersionMinor = 7
// VersionPatch is for backwards-compatible bug fixes
- VersionPatch = 6
+ VersionPatch = 0
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = "-dev"
diff --git a/vendor/github.com/boltdb/bolt/LICENSE b/vendor/github.com/etcd-io/bbolt/LICENSE
index 004e77fe5..004e77fe5 100644
--- a/vendor/github.com/boltdb/bolt/LICENSE
+++ b/vendor/github.com/etcd-io/bbolt/LICENSE
diff --git a/vendor/github.com/boltdb/bolt/README.md b/vendor/github.com/etcd-io/bbolt/README.md
index 7d43a15b2..e9989efc5 100644
--- a/vendor/github.com/boltdb/bolt/README.md
+++ b/vendor/github.com/etcd-io/bbolt/README.md
@@ -1,5 +1,18 @@
-Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.2.1-green.svg)
-====
+bbolt
+=====
+
+[![Go Report Card](https://goreportcard.com/badge/github.com/etcd-io/bbolt?style=flat-square)](https://goreportcard.com/report/github.com/etcd-io/bbolt)
+[![Coverage](https://codecov.io/gh/etcd-io/bbolt/branch/master/graph/badge.svg)](https://codecov.io/gh/etcd-io/bbolt)
+[![Build Status Travis](https://img.shields.io/travis/etcd-io/bboltlabs.svg?style=flat-square&&branch=master)](https://travis-ci.com/etcd-io/bbolt)
+[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/etcd-io/bbolt)
+[![Releases](https://img.shields.io/github/release/etcd-io/bbolt/all.svg?style=flat-square)](https://github.com/etcd-io/bbolt/releases)
+[![LICENSE](https://img.shields.io/github/license/etcd-io/bbolt.svg?style=flat-square)](https://github.com/etcd-io/bbolt/blob/master/LICENSE)
+
+bbolt is a fork of [Ben Johnson's][gh_ben] [Bolt][bolt] key/value
+store. The purpose of this fork is to provide the Go community with an active
+maintenance and development target for Bolt; the goal is improved reliability
+and stability. bbolt includes bug fixes, performance enhancements, and features
+not found in Bolt while preserving backwards compatibility with the Bolt API.
Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
[LMDB project][lmdb]. The goal of the project is to provide a simple,
@@ -10,6 +23,8 @@ Since Bolt is meant to be used as such a low-level piece of functionality,
simplicity is key. The API will be small and only focus on getting values
and setting values. That's it.
+[gh_ben]: https://github.com/benbjohnson
+[bolt]: https://github.com/boltdb/bolt
[hyc_symas]: https://twitter.com/hyc_symas
[lmdb]: http://symas.com/mdb/
@@ -21,36 +36,42 @@ consistency and thread safety. Bolt is currently used in high-load production
environments serving databases as large as 1TB. Many companies such as
Shopify and Heroku use Bolt-backed services every day.
+## Project versioning
+
+bbolt uses [semantic versioning](http://semver.org).
+API should not change between patch and minor releases.
+New minor versions may add additional features to the API.
+
## Table of Contents
-- [Getting Started](#getting-started)
- - [Installing](#installing)
- - [Opening a database](#opening-a-database)
- - [Transactions](#transactions)
- - [Read-write transactions](#read-write-transactions)
- - [Read-only transactions](#read-only-transactions)
- - [Batch read-write transactions](#batch-read-write-transactions)
- - [Managing transactions manually](#managing-transactions-manually)
- - [Using buckets](#using-buckets)
- - [Using key/value pairs](#using-keyvalue-pairs)
- - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
- - [Iterating over keys](#iterating-over-keys)
- - [Prefix scans](#prefix-scans)
- - [Range scans](#range-scans)
- - [ForEach()](#foreach)
- - [Nested buckets](#nested-buckets)
- - [Database backups](#database-backups)
- - [Statistics](#statistics)
- - [Read-Only Mode](#read-only-mode)
- - [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
-- [Resources](#resources)
-- [Comparison with other databases](#comparison-with-other-databases)
- - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
- - [LevelDB, RocksDB](#leveldb-rocksdb)
- - [LMDB](#lmdb)
-- [Caveats & Limitations](#caveats--limitations)
-- [Reading the Source](#reading-the-source)
-- [Other Projects Using Bolt](#other-projects-using-bolt)
+ - [Getting Started](#getting-started)
+ - [Installing](#installing)
+ - [Opening a database](#opening-a-database)
+ - [Transactions](#transactions)
+ - [Read-write transactions](#read-write-transactions)
+ - [Read-only transactions](#read-only-transactions)
+ - [Batch read-write transactions](#batch-read-write-transactions)
+ - [Managing transactions manually](#managing-transactions-manually)
+ - [Using buckets](#using-buckets)
+ - [Using key/value pairs](#using-keyvalue-pairs)
+ - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
+ - [Iterating over keys](#iterating-over-keys)
+ - [Prefix scans](#prefix-scans)
+ - [Range scans](#range-scans)
+ - [ForEach()](#foreach)
+ - [Nested buckets](#nested-buckets)
+ - [Database backups](#database-backups)
+ - [Statistics](#statistics)
+ - [Read-Only Mode](#read-only-mode)
+ - [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
+ - [Resources](#resources)
+ - [Comparison with other databases](#comparison-with-other-databases)
+ - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
+ - [LevelDB, RocksDB](#leveldb-rocksdb)
+ - [LMDB](#lmdb)
+ - [Caveats & Limitations](#caveats--limitations)
+ - [Reading the Source](#reading-the-source)
+ - [Other Projects Using Bolt](#other-projects-using-bolt)
## Getting Started
@@ -59,13 +80,28 @@ Shopify and Heroku use Bolt-backed services every day.
To start using Bolt, install Go and run `go get`:
```sh
-$ go get github.com/boltdb/bolt/...
+$ go get go.etcd.io/bbolt/...
```
This will retrieve the library and install the `bolt` command line utility into
your `$GOBIN` path.
+### Importing bbolt
+
+To use bbolt as an embedded key-value store, import as:
+
+```go
+import bolt "go.etcd.io/bbolt"
+
+db, err := bolt.Open(path, 0666, nil)
+if err != nil {
+ return err
+}
+defer db.Close()
+```
+
+
### Opening a database
The top-level object in Bolt is a `DB`. It is represented as a single file on
@@ -79,7 +115,7 @@ package main
import (
"log"
- "github.com/boltdb/bolt"
+ bolt "go.etcd.io/bbolt"
)
func main() {
@@ -522,7 +558,7 @@ this from a read-only transaction, it will perform a hot backup and not block
your other database reads and writes.
By default, it will use a regular file handle which will utilize the operating
-system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx)
+system's page cache. See the [`Tx`](https://godoc.org/go.etcd.io/bbolt#Tx)
documentation for information about optimizing for larger-than-RAM datasets.
One common use case is to backup over HTTP so you can use tools like `cURL` to
@@ -811,7 +847,7 @@ Here are a few things to note when evaluating and using Bolt:
## Reading the Source
-Bolt is a relatively small code base (<3KLOC) for an embedded, serializable,
+Bolt is a relatively small code base (<5KLOC) for an embedded, serializable,
transactional key/value database so it can be a good starting point for people
interested in how databases work.
@@ -863,54 +899,56 @@ them via pull request.
Below is a list of public, open source projects that use Bolt:
-* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
-* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
+* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
-* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
-* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
-* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
-* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
+* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal.
+* [boltcli](https://github.com/spacewander/boltcli) - the redis-cli for boltdb with Lua script support.
+* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB
+* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
+* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
+* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
+* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
+* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
+* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
+ simple tx and key scans.
+* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
-* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
-* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
+* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
+* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
+* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency.
+* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
+* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
+* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
+* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
+* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
+* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
+* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
+* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
+* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
-* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
-* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
+* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
+* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
+* [Key Value Access Langusge (KVAL)](https://github.com/kval-access-language) - A proposed grammar for key-value datastores offering a bbolt binding.
* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
-* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
-* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
-* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
-* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
-* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
-* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
-* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
+* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
+* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
+* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
+* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
+* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
+* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
-* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
-* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
-* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
+* [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi.
+* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
+* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
backed by boltdb.
-* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
- simple tx and key scans.
-* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
-* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
-* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
-* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
-* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
-* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
-* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
-* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
-* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
+* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
+* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
+* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
-* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
-* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal.
-* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
-* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency.
-* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
-* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB
-* [Ponzu CMS](https://ponzu-cms.org) - Headless CMS + automatic JSON API with auto-HTTPS, HTTP/2 Server Push, and flexible server framework.
+* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
If you are using Bolt in a project please send a pull request to add it to the list.
diff --git a/vendor/github.com/boltdb/bolt/bolt_386.go b/vendor/github.com/etcd-io/bbolt/bolt_386.go
index 820d533c1..4d35ee7cf 100644
--- a/vendor/github.com/boltdb/bolt/bolt_386.go
+++ b/vendor/github.com/etcd-io/bbolt/bolt_386.go
@@ -1,4 +1,4 @@
-package bolt
+package bbolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
diff --git a/vendor/github.com/boltdb/bolt/bolt_amd64.go b/vendor/github.com/etcd-io/bbolt/bolt_amd64.go
index 98fafdb47..60a52dad5 100644
--- a/vendor/github.com/boltdb/bolt/bolt_amd64.go
+++ b/vendor/github.com/etcd-io/bbolt/bolt_amd64.go
@@ -1,4 +1,4 @@
-package bolt
+package bbolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
diff --git a/vendor/github.com/boltdb/bolt/bolt_arm.go b/vendor/github.com/etcd-io/bbolt/bolt_arm.go
index 7e5cb4b94..105d27ddb 100644
--- a/vendor/github.com/boltdb/bolt/bolt_arm.go
+++ b/vendor/github.com/etcd-io/bbolt/bolt_arm.go
@@ -1,4 +1,4 @@
-package bolt
+package bbolt
import "unsafe"
diff --git a/vendor/github.com/boltdb/bolt/bolt_arm64.go b/vendor/github.com/etcd-io/bbolt/bolt_arm64.go
index b26d84f91..f5aa2a5ee 100644
--- a/vendor/github.com/boltdb/bolt/bolt_arm64.go
+++ b/vendor/github.com/etcd-io/bbolt/bolt_arm64.go
@@ -1,6 +1,6 @@
// +build arm64
-package bolt
+package bbolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
diff --git a/vendor/github.com/boltdb/bolt/bolt_linux.go b/vendor/github.com/etcd-io/bbolt/bolt_linux.go
index 2b6766614..7707bcacf 100644
--- a/vendor/github.com/boltdb/bolt/bolt_linux.go
+++ b/vendor/github.com/etcd-io/bbolt/bolt_linux.go
@@ -1,4 +1,4 @@
-package bolt
+package bbolt
import (
"syscall"
diff --git a/vendor/github.com/etcd-io/bbolt/bolt_mips64x.go b/vendor/github.com/etcd-io/bbolt/bolt_mips64x.go
new file mode 100644
index 000000000..baeb289fd
--- /dev/null
+++ b/vendor/github.com/etcd-io/bbolt/bolt_mips64x.go
@@ -0,0 +1,12 @@
+// +build mips64 mips64le
+
+package bbolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0x8000000000 // 512GB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0x7FFFFFFF
+
+// Are unaligned load/stores broken on this arch?
+var brokenUnaligned = false
diff --git a/vendor/github.com/etcd-io/bbolt/bolt_mipsx.go b/vendor/github.com/etcd-io/bbolt/bolt_mipsx.go
new file mode 100644
index 000000000..2d9b1a91f
--- /dev/null
+++ b/vendor/github.com/etcd-io/bbolt/bolt_mipsx.go
@@ -0,0 +1,12 @@
+// +build mips mipsle
+
+package bbolt
+
+// maxMapSize represents the largest mmap size supported by Bolt.
+const maxMapSize = 0x40000000 // 1GB
+
+// maxAllocSize is the size used when creating array pointers.
+const maxAllocSize = 0xFFFFFFF
+
+// Are unaligned load/stores broken on this arch?
+var brokenUnaligned = false
diff --git a/vendor/github.com/boltdb/bolt/bolt_openbsd.go b/vendor/github.com/etcd-io/bbolt/bolt_openbsd.go
index 7058c3d73..d7f50358e 100644
--- a/vendor/github.com/boltdb/bolt/bolt_openbsd.go
+++ b/vendor/github.com/etcd-io/bbolt/bolt_openbsd.go
@@ -1,4 +1,4 @@
-package bolt
+package bbolt
import (
"syscall"
diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc.go b/vendor/github.com/etcd-io/bbolt/bolt_ppc.go
index 645ddc3ed..69804714a 100644
--- a/vendor/github.com/boltdb/bolt/bolt_ppc.go
+++ b/vendor/github.com/etcd-io/bbolt/bolt_ppc.go
@@ -1,9 +1,12 @@
// +build ppc
-package bolt
+package bbolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF
+
+// Are unaligned load/stores broken on this arch?
+var brokenUnaligned = false
diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64.go b/vendor/github.com/etcd-io/bbolt/bolt_ppc64.go
index 9331d9771..356590857 100644
--- a/vendor/github.com/boltdb/bolt/bolt_ppc64.go
+++ b/vendor/github.com/etcd-io/bbolt/bolt_ppc64.go
@@ -1,6 +1,6 @@
// +build ppc64
-package bolt
+package bbolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64le.go b/vendor/github.com/etcd-io/bbolt/bolt_ppc64le.go
index 8c143bc5d..422c7c69d 100644
--- a/vendor/github.com/boltdb/bolt/bolt_ppc64le.go
+++ b/vendor/github.com/etcd-io/bbolt/bolt_ppc64le.go
@@ -1,6 +1,6 @@
// +build ppc64le
-package bolt
+package bbolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
diff --git a/vendor/github.com/boltdb/bolt/bolt_s390x.go b/vendor/github.com/etcd-io/bbolt/bolt_s390x.go
index d7c39af92..6d3fcb825 100644
--- a/vendor/github.com/boltdb/bolt/bolt_s390x.go
+++ b/vendor/github.com/etcd-io/bbolt/bolt_s390x.go
@@ -1,6 +1,6 @@
// +build s390x
-package bolt
+package bbolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
diff --git a/vendor/github.com/boltdb/bolt/bolt_unix.go b/vendor/github.com/etcd-io/bbolt/bolt_unix.go
index cad62dda1..5f2bb5145 100644
--- a/vendor/github.com/boltdb/bolt/bolt_unix.go
+++ b/vendor/github.com/etcd-io/bbolt/bolt_unix.go
@@ -1,41 +1,43 @@
// +build !windows,!plan9,!solaris
-package bolt
+package bbolt
import (
"fmt"
- "os"
"syscall"
"time"
"unsafe"
)
// flock acquires an advisory lock on a file descriptor.
-func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
+func flock(db *DB, exclusive bool, timeout time.Duration) error {
var t time.Time
+ if timeout != 0 {
+ t = time.Now()
+ }
+ fd := db.file.Fd()
+ flag := syscall.LOCK_NB
+ if exclusive {
+ flag |= syscall.LOCK_EX
+ } else {
+ flag |= syscall.LOCK_SH
+ }
for {
- // If we're beyond our timeout then return an error.
- // This can only occur after we've attempted a flock once.
- if t.IsZero() {
- t = time.Now()
- } else if timeout > 0 && time.Since(t) > timeout {
- return ErrTimeout
- }
- flag := syscall.LOCK_SH
- if exclusive {
- flag = syscall.LOCK_EX
- }
-
- // Otherwise attempt to obtain an exclusive lock.
- err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB)
+ // Attempt to obtain an exclusive lock.
+ err := syscall.Flock(int(fd), flag)
if err == nil {
return nil
} else if err != syscall.EWOULDBLOCK {
return err
}
+ // If we timed out then return an error.
+ if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
+ return ErrTimeout
+ }
+
// Wait for a bit and try again.
- time.Sleep(50 * time.Millisecond)
+ time.Sleep(flockRetryTimeout)
}
}
@@ -53,7 +55,9 @@ func mmap(db *DB, sz int) error {
}
// Advise the kernel that the mmap is accessed randomly.
- if err := madvise(b, syscall.MADV_RANDOM); err != nil {
+ err = madvise(b, syscall.MADV_RANDOM)
+ if err != nil && err != syscall.ENOSYS {
+ // Ignore not implemented error in kernel because it still works.
return fmt.Errorf("madvise: %s", err)
}
diff --git a/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go b/vendor/github.com/etcd-io/bbolt/bolt_unix_solaris.go
index 307bf2b3e..babad6578 100644
--- a/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go
+++ b/vendor/github.com/etcd-io/bbolt/bolt_unix_solaris.go
@@ -1,8 +1,7 @@
-package bolt
+package bbolt
import (
"fmt"
- "os"
"syscall"
"time"
"unsafe"
@@ -11,36 +10,35 @@ import (
)
// flock acquires an advisory lock on a file descriptor.
-func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
+func flock(db *DB, exclusive bool, timeout time.Duration) error {
var t time.Time
+ if timeout != 0 {
+ t = time.Now()
+ }
+ fd := db.file.Fd()
+ var lockType int16
+ if exclusive {
+ lockType = syscall.F_WRLCK
+ } else {
+ lockType = syscall.F_RDLCK
+ }
for {
- // If we're beyond our timeout then return an error.
- // This can only occur after we've attempted a flock once.
- if t.IsZero() {
- t = time.Now()
- } else if timeout > 0 && time.Since(t) > timeout {
- return ErrTimeout
- }
- var lock syscall.Flock_t
- lock.Start = 0
- lock.Len = 0
- lock.Pid = 0
- lock.Whence = 0
- lock.Pid = 0
- if exclusive {
- lock.Type = syscall.F_WRLCK
- } else {
- lock.Type = syscall.F_RDLCK
- }
- err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock)
+ // Attempt to obtain an exclusive lock.
+ lock := syscall.Flock_t{Type: lockType}
+ err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock)
if err == nil {
return nil
} else if err != syscall.EAGAIN {
return err
}
+ // If we timed out then return an error.
+ if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
+ return ErrTimeout
+ }
+
// Wait for a bit and try again.
- time.Sleep(50 * time.Millisecond)
+ time.Sleep(flockRetryTimeout)
}
}
diff --git a/vendor/github.com/boltdb/bolt/bolt_windows.go b/vendor/github.com/etcd-io/bbolt/bolt_windows.go
index b00fb0720..fca178bd2 100644
--- a/vendor/github.com/boltdb/bolt/bolt_windows.go
+++ b/vendor/github.com/etcd-io/bbolt/bolt_windows.go
@@ -1,4 +1,4 @@
-package bolt
+package bbolt
import (
"fmt"
@@ -16,8 +16,6 @@ var (
)
const (
- lockExt = ".lock"
-
// see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
flagLockExclusive = 2
flagLockFailImmediately = 1
@@ -48,48 +46,47 @@ func fdatasync(db *DB) error {
}
// flock acquires an advisory lock on a file descriptor.
-func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
- // Create a separate lock file on windows because a process
- // cannot share an exclusive lock on the same file. This is
- // needed during Tx.WriteTo().
- f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode)
- if err != nil {
- return err
- }
- db.lockfile = f
-
+func flock(db *DB, exclusive bool, timeout time.Duration) error {
var t time.Time
+ if timeout != 0 {
+ t = time.Now()
+ }
+ var flag uint32 = flagLockFailImmediately
+ if exclusive {
+ flag |= flagLockExclusive
+ }
for {
- // If we're beyond our timeout then return an error.
- // This can only occur after we've attempted a flock once.
- if t.IsZero() {
- t = time.Now()
- } else if timeout > 0 && time.Since(t) > timeout {
- return ErrTimeout
- }
-
- var flag uint32 = flagLockFailImmediately
- if exclusive {
- flag |= flagLockExclusive
- }
+ // Fix for https://github.com/etcd-io/bbolt/issues/121. Use byte-range
+ // -1..0 as the lock on the database file.
+ var m1 uint32 = (1 << 32) - 1 // -1 in a uint32
+ err := lockFileEx(syscall.Handle(db.file.Fd()), flag, 0, 1, 0, &syscall.Overlapped{
+ Offset: m1,
+ OffsetHigh: m1,
+ })
- err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{})
if err == nil {
return nil
} else if err != errLockViolation {
return err
}
+ // If we timed oumercit then return an error.
+ if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
+ return ErrTimeout
+ }
+
// Wait for a bit and try again.
- time.Sleep(50 * time.Millisecond)
+ time.Sleep(flockRetryTimeout)
}
}
// funlock releases an advisory lock on a file descriptor.
func funlock(db *DB) error {
- err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
- db.lockfile.Close()
- os.Remove(db.path + lockExt)
+ var m1 uint32 = (1 << 32) - 1 // -1 in a uint32
+ err := unlockFileEx(syscall.Handle(db.file.Fd()), 0, 1, 0, &syscall.Overlapped{
+ Offset: m1,
+ OffsetHigh: m1,
+ })
return err
}
diff --git a/vendor/github.com/boltdb/bolt/boltsync_unix.go b/vendor/github.com/etcd-io/bbolt/boltsync_unix.go
index f50442523..9587afefe 100644
--- a/vendor/github.com/boltdb/bolt/boltsync_unix.go
+++ b/vendor/github.com/etcd-io/bbolt/boltsync_unix.go
@@ -1,6 +1,6 @@
// +build !windows,!plan9,!linux,!openbsd
-package bolt
+package bbolt
// fdatasync flushes written data to a file descriptor.
func fdatasync(db *DB) error {
diff --git a/vendor/github.com/boltdb/bolt/bucket.go b/vendor/github.com/etcd-io/bbolt/bucket.go
index 0c5bf2746..84bfd4d6a 100644
--- a/vendor/github.com/boltdb/bolt/bucket.go
+++ b/vendor/github.com/etcd-io/bbolt/bucket.go
@@ -1,4 +1,4 @@
-package bolt
+package bbolt
import (
"bytes"
@@ -14,13 +14,6 @@ const (
MaxValueSize = (1 << 31) - 2
)
-const (
- maxUint = ^uint(0)
- minUint = 0
- maxInt = int(^uint(0) >> 1)
- minInt = -maxInt - 1
-)
-
const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
const (
@@ -323,7 +316,12 @@ func (b *Bucket) Delete(key []byte) error {
// Move cursor to correct position.
c := b.Cursor()
- _, _, flags := c.seek(key)
+ k, _, flags := c.seek(key)
+
+ // Return nil if the key doesn't exist.
+ if !bytes.Equal(key, k) {
+ return nil
+ }
// Return an error if there is already existing bucket value.
if (flags & bucketLeafFlag) != 0 {
diff --git a/vendor/github.com/boltdb/bolt/cursor.go b/vendor/github.com/etcd-io/bbolt/cursor.go
index 1be9f35e3..3000aced6 100644
--- a/vendor/github.com/boltdb/bolt/cursor.go
+++ b/vendor/github.com/etcd-io/bbolt/cursor.go
@@ -1,4 +1,4 @@
-package bolt
+package bbolt
import (
"bytes"
@@ -157,12 +157,6 @@ func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
// Start from root page/node and traverse to correct page.
c.stack = c.stack[:0]
c.search(seek, c.bucket.root)
- ref := &c.stack[len(c.stack)-1]
-
- // If the cursor is pointing to the end of page/node then return nil.
- if ref.index >= ref.count() {
- return nil, nil, 0
- }
// If this is a bucket then return a nil value.
return c.keyValue()
@@ -339,6 +333,8 @@ func (c *Cursor) nsearch(key []byte) {
// keyValue returns the key and value of the current leaf element.
func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
ref := &c.stack[len(c.stack)-1]
+
+ // If the cursor is pointing to the end of page/node then return nil.
if ref.count() == 0 || ref.index >= ref.count() {
return nil, nil, 0
}
diff --git a/vendor/github.com/boltdb/bolt/db.go b/vendor/github.com/etcd-io/bbolt/db.go
index f352ff14f..962248c99 100644
--- a/vendor/github.com/boltdb/bolt/db.go
+++ b/vendor/github.com/etcd-io/bbolt/db.go
@@ -1,4 +1,4 @@
-package bolt
+package bbolt
import (
"errors"
@@ -7,8 +7,7 @@ import (
"log"
"os"
"runtime"
- "runtime/debug"
- "strings"
+ "sort"
"sync"
"time"
"unsafe"
@@ -23,6 +22,8 @@ const version = 2
// Represents a marker value to indicate that a file is a Bolt DB.
const magic uint32 = 0xED0CDAED
+const pgidNoFreelist pgid = 0xffffffffffffffff
+
// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when
// syncing changes to a file. This is required as some operating systems,
// such as OpenBSD, do not have a unified buffer cache (UBC) and writes
@@ -39,6 +40,19 @@ const (
// default page size for db is set to the OS page size.
var defaultPageSize = os.Getpagesize()
+// The time elapsed between consecutive file locking attempts.
+const flockRetryTimeout = 50 * time.Millisecond
+
+// FreelistType is the type of the freelist backend
+type FreelistType string
+
+const (
+ // FreelistArrayType indicates backend freelist type is array
+ FreelistArrayType = FreelistType("array")
+ // FreelistMapType indicates backend freelist type is hashmap
+ FreelistMapType = FreelistType("hashmap")
+)
+
// DB represents a collection of buckets persisted to a file on disk.
// All data access is performed through transactions which can be obtained through the DB.
// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
@@ -61,6 +75,18 @@ type DB struct {
// THIS IS UNSAFE. PLEASE USE WITH CAUTION.
NoSync bool
+ // When true, skips syncing freelist to disk. This improves the database
+ // write performance under normal operation, but requires a full database
+ // re-sync during recovery.
+ NoFreelistSync bool
+
+ // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures
+ // dramatic performance degradation if database is large and framentation in freelist is common.
+ // The alternative one is using hashmap, it is faster in almost all circumstances
+ // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe.
+ // The default type is array
+ FreelistType FreelistType
+
// When true, skips the truncate call when growing the database.
// Setting this to true is only safe on non-ext3/ext4 systems.
// Skipping truncation avoids preallocation of hard drive space and
@@ -96,8 +122,7 @@ type DB struct {
path string
file *os.File
- lockfile *os.File // windows only
- dataref []byte // mmap'ed readonly, write throws SEGV
+ dataref []byte // mmap'ed readonly, write throws SEGV
data *[maxMapSize]byte
datasz int
filesz int // current on disk file size
@@ -107,9 +132,11 @@ type DB struct {
opened bool
rwtx *Tx
txs []*Tx
- freelist *freelist
stats Stats
+ freelist *freelist
+ freelistLoad sync.Once
+
pagePool sync.Pool
batchMu sync.Mutex
@@ -148,14 +175,18 @@ func (db *DB) String() string {
// If the file does not exist then it will be created automatically.
// Passing in nil options will cause Bolt to open the database with the default options.
func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
- var db = &DB{opened: true}
-
+ db := &DB{
+ opened: true,
+ }
// Set default options if no options are provided.
if options == nil {
options = DefaultOptions
}
+ db.NoSync = options.NoSync
db.NoGrowSync = options.NoGrowSync
db.MmapFlags = options.MmapFlags
+ db.NoFreelistSync = options.NoFreelistSync
+ db.FreelistType = options.FreelistType
// Set default values for later DB operations.
db.MaxBatchSize = DefaultMaxBatchSize
@@ -183,7 +214,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
// if !options.ReadOnly.
// The database file is locked using the shared lock (more than one process may
// hold a lock at the same time) otherwise (options.ReadOnly is set).
- if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil {
+ if err := flock(db, !db.readOnly, options.Timeout); err != nil {
_ = db.close()
return nil, err
}
@@ -191,31 +222,41 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
// Default values for test hooks
db.ops.writeAt = db.file.WriteAt
+ if db.pageSize = options.PageSize; db.pageSize == 0 {
+ // Set the default page size to the OS page size.
+ db.pageSize = defaultPageSize
+ }
+
// Initialize the database if it doesn't exist.
if info, err := db.file.Stat(); err != nil {
+ _ = db.close()
return nil, err
} else if info.Size() == 0 {
// Initialize new files with meta pages.
if err := db.init(); err != nil {
+ // clean up file descriptor on initialization fail
+ _ = db.close()
return nil, err
}
} else {
// Read the first meta page to determine the page size.
var buf [0x1000]byte
- if _, err := db.file.ReadAt(buf[:], 0); err == nil {
- m := db.pageInBuffer(buf[:], 0).meta()
- if err := m.validate(); err != nil {
- // If we can't read the page size, we can assume it's the same
- // as the OS -- since that's how the page size was chosen in the
- // first place.
- //
- // If the first page is invalid and this OS uses a different
- // page size than what the database was created with then we
- // are out of luck and cannot access the database.
- db.pageSize = os.Getpagesize()
- } else {
+ // If we can't read the page size, but can read a page, assume
+ // it's the same as the OS or one given -- since that's how the
+ // page size was chosen in the first place.
+ //
+ // If the first page is invalid and this OS uses a different
+ // page size than what the database was created with then we
+ // are out of luck and cannot access the database.
+ //
+ // TODO: scan for next page
+ if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) {
+ if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil {
db.pageSize = int(m.pageSize)
}
+ } else {
+ _ = db.close()
+ return nil, ErrInvalid
}
}
@@ -232,14 +273,50 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
return nil, err
}
- // Read in the freelist.
- db.freelist = newFreelist()
- db.freelist.read(db.page(db.meta().freelist))
+ if db.readOnly {
+ return db, nil
+ }
+
+ db.loadFreelist()
+
+ // Flush freelist when transitioning from no sync to sync so
+ // NoFreelistSync unaware boltdb can open the db later.
+ if !db.NoFreelistSync && !db.hasSyncedFreelist() {
+ tx, err := db.Begin(true)
+ if tx != nil {
+ err = tx.Commit()
+ }
+ if err != nil {
+ _ = db.close()
+ return nil, err
+ }
+ }
// Mark the database as opened and return.
return db, nil
}
+// loadFreelist reads the freelist if it is synced, or reconstructs it
+// by scanning the DB if it is not synced. It assumes there are no
+// concurrent accesses being made to the freelist.
+func (db *DB) loadFreelist() {
+ db.freelistLoad.Do(func() {
+ db.freelist = newFreelist(db.FreelistType)
+ if !db.hasSyncedFreelist() {
+ // Reconstruct free list by scanning the DB.
+ db.freelist.readIDs(db.freepages())
+ } else {
+ // Read free list from freelist page.
+ db.freelist.read(db.page(db.meta().freelist))
+ }
+ db.stats.FreePageN = db.freelist.free_count()
+ })
+}
+
+func (db *DB) hasSyncedFreelist() bool {
+ return db.meta().freelist != pgidNoFreelist
+}
+
// mmap opens the underlying memory-mapped file and initializes the meta references.
// minsz is the minimum size that the new mmap can be.
func (db *DB) mmap(minsz int) error {
@@ -341,9 +418,6 @@ func (db *DB) mmapSize(size int) (int, error) {
// init creates a new database file and initializes its meta pages.
func (db *DB) init() error {
- // Set the page size to the OS page size.
- db.pageSize = os.Getpagesize()
-
// Create two meta pages on a buffer.
buf := make([]byte, db.pageSize*4)
for i := 0; i < 2; i++ {
@@ -387,7 +461,8 @@ func (db *DB) init() error {
}
// Close releases all database resources.
-// All transactions must be closed before closing the database.
+// It will block waiting for any open transactions to finish
+// before closing the database and returning.
func (db *DB) Close() error {
db.rwlock.Lock()
defer db.rwlock.Unlock()
@@ -395,8 +470,8 @@ func (db *DB) Close() error {
db.metalock.Lock()
defer db.metalock.Unlock()
- db.mmaplock.RLock()
- defer db.mmaplock.RUnlock()
+ db.mmaplock.Lock()
+ defer db.mmaplock.Unlock()
return db.close()
}
@@ -526,21 +601,36 @@ func (db *DB) beginRWTx() (*Tx, error) {
t := &Tx{writable: true}
t.init(db)
db.rwtx = t
+ db.freePages()
+ return t, nil
+}
- // Free any pages associated with closed read-only transactions.
- var minid txid = 0xFFFFFFFFFFFFFFFF
- for _, t := range db.txs {
- if t.meta.txid < minid {
- minid = t.meta.txid
- }
+// freePages releases any pages associated with closed read-only transactions.
+func (db *DB) freePages() {
+ // Free all pending pages prior to earliest open transaction.
+ sort.Sort(txsById(db.txs))
+ minid := txid(0xFFFFFFFFFFFFFFFF)
+ if len(db.txs) > 0 {
+ minid = db.txs[0].meta.txid
}
if minid > 0 {
db.freelist.release(minid - 1)
}
-
- return t, nil
+ // Release unused txid extents.
+ for _, t := range db.txs {
+ db.freelist.releaseRange(minid, t.meta.txid-1)
+ minid = t.meta.txid + 1
+ }
+ db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF))
+ // Any page both allocated and freed in an extent is safe to release.
}
+type txsById []*Tx
+
+func (t txsById) Len() int { return len(t) }
+func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
+func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid }
+
// removeTx removes a transaction from the database.
func (db *DB) removeTx(tx *Tx) {
// Release the read lock on the mmap.
@@ -633,11 +723,7 @@ func (db *DB) View(fn func(*Tx) error) error {
return err
}
- if err := t.Rollback(); err != nil {
- return err
- }
-
- return nil
+ return t.Rollback()
}
// Batch calls fn as part of a batch. It behaves similar to Update,
@@ -737,9 +823,7 @@ retry:
// pass success, or bolt internal errors, to all callers
for _, c := range b.calls {
- if c.err != nil {
- c.err <- err
- }
+ c.err <- err
}
break retry
}
@@ -826,7 +910,7 @@ func (db *DB) meta() *meta {
}
// allocate returns a contiguous block of memory starting at a given page.
-func (db *DB) allocate(count int) (*page, error) {
+func (db *DB) allocate(txid txid, count int) (*page, error) {
// Allocate a temporary buffer for the page.
var buf []byte
if count == 1 {
@@ -838,7 +922,7 @@ func (db *DB) allocate(count int) (*page, error) {
p.overflow = uint32(count - 1)
// Use pages from the freelist if they are available.
- if p.id = db.freelist.allocate(count); p.id != 0 {
+ if p.id = db.freelist.allocate(txid, count); p.id != 0 {
return p, nil
}
@@ -893,6 +977,38 @@ func (db *DB) IsReadOnly() bool {
return db.readOnly
}
+func (db *DB) freepages() []pgid {
+ tx, err := db.beginTx()
+ defer func() {
+ err = tx.Rollback()
+ if err != nil {
+ panic("freepages: failed to rollback tx")
+ }
+ }()
+ if err != nil {
+ panic("freepages: failed to open read only tx")
+ }
+
+ reachable := make(map[pgid]*page)
+ nofreed := make(map[pgid]bool)
+ ech := make(chan error)
+ go func() {
+ for e := range ech {
+ panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e))
+ }
+ }()
+ tx.checkBucket(&tx.root, reachable, nofreed, ech)
+ close(ech)
+
+ var fids []pgid
+ for i := pgid(2); i < db.meta().pgid; i++ {
+ if _, ok := reachable[i]; !ok {
+ fids = append(fids, i)
+ }
+ }
+ return fids
+}
+
// Options represents the options that can be set when opening a database.
type Options struct {
// Timeout is the amount of time to wait to obtain a file lock.
@@ -903,6 +1019,17 @@ type Options struct {
// Sets the DB.NoGrowSync flag before memory mapping the file.
NoGrowSync bool
+ // Do not sync freelist to disk. This improves the database write performance
+ // under normal operation, but requires a full database re-sync during recovery.
+ NoFreelistSync bool
+
+ // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures
+ // dramatic performance degradation if database is large and framentation in freelist is common.
+ // The alternative one is using hashmap, it is faster in almost all circumstances
+ // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe.
+ // The default type is array
+ FreelistType FreelistType
+
// Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to
// grab a shared lock (UNIX).
ReadOnly bool
@@ -919,13 +1046,22 @@ type Options struct {
// If initialMmapSize is smaller than the previous database size,
// it takes no effect.
InitialMmapSize int
+
+ // PageSize overrides the default OS page size.
+ PageSize int
+
+ // NoSync sets the initial value of DB.NoSync. Normally this can just be
+ // set directly on the DB itself when returned from Open(), but this option
+ // is useful in APIs which expose Options but not the underlying DB.
+ NoSync bool
}
// DefaultOptions represent the options used if nil options are passed into Open().
// No timeout is used which will cause Bolt to wait indefinitely for a lock.
var DefaultOptions = &Options{
- Timeout: 0,
- NoGrowSync: false,
+ Timeout: 0,
+ NoGrowSync: false,
+ FreelistType: FreelistArrayType,
}
// Stats represents statistics about the database.
@@ -960,10 +1096,6 @@ func (s *Stats) Sub(other *Stats) Stats {
return diff
}
-func (s *Stats) add(other *Stats) {
- s.TxStats.add(&other.TxStats)
-}
-
type Info struct {
Data uintptr
PageSize int
@@ -1002,7 +1134,8 @@ func (m *meta) copy(dest *meta) {
func (m *meta) write(p *page) {
if m.root.root >= m.pgid {
panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid))
- } else if m.freelist >= m.pgid {
+ } else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist {
+ // TODO: reject pgidNoFreeList if !NoFreelistSync
panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid))
}
@@ -1029,11 +1162,3 @@ func _assert(condition bool, msg string, v ...interface{}) {
panic(fmt.Sprintf("assertion failed: "+msg, v...))
}
}
-
-func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) }
-func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) }
-
-func printstack() {
- stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n")
- fmt.Fprintln(os.Stderr, stack)
-}
diff --git a/vendor/github.com/boltdb/bolt/doc.go b/vendor/github.com/etcd-io/bbolt/doc.go
index cc937845d..95f25f01c 100644
--- a/vendor/github.com/boltdb/bolt/doc.go
+++ b/vendor/github.com/etcd-io/bbolt/doc.go
@@ -1,5 +1,5 @@
/*
-Package bolt implements a low-level key/value store in pure Go. It supports
+package bbolt implements a low-level key/value store in pure Go. It supports
fully serializable transactions, ACID semantics, and lock-free MVCC with
multiple readers and a single writer. Bolt can be used for projects that
want a simple data store without the need to add large dependencies such as
@@ -41,4 +41,4 @@ point to different data or can point to invalid memory which will cause a panic.
*/
-package bolt
+package bbolt
diff --git a/vendor/github.com/boltdb/bolt/errors.go b/vendor/github.com/etcd-io/bbolt/errors.go
index a3620a3eb..48758ca57 100644
--- a/vendor/github.com/boltdb/bolt/errors.go
+++ b/vendor/github.com/etcd-io/bbolt/errors.go
@@ -1,4 +1,4 @@
-package bolt
+package bbolt
import "errors"
diff --git a/vendor/github.com/etcd-io/bbolt/freelist.go b/vendor/github.com/etcd-io/bbolt/freelist.go
new file mode 100644
index 000000000..93fd85d50
--- /dev/null
+++ b/vendor/github.com/etcd-io/bbolt/freelist.go
@@ -0,0 +1,370 @@
+package bbolt
+
+import (
+ "fmt"
+ "sort"
+ "unsafe"
+)
+
+// txPending holds a list of pgids and corresponding allocation txns
+// that are pending to be freed.
+type txPending struct {
+ ids []pgid
+ alloctx []txid // txids allocating the ids
+ lastReleaseBegin txid // beginning txid of last matching releaseRange
+}
+
+// pidSet holds the set of starting pgids which have the same span size
+type pidSet map[pgid]struct{}
+
+// freelist represents a list of all pages that are available for allocation.
+// It also tracks pages that have been freed but are still in use by open transactions.
+type freelist struct {
+ freelistType FreelistType // freelist type
+ ids []pgid // all free and available free page ids.
+ allocs map[pgid]txid // mapping of txid that allocated a pgid.
+ pending map[txid]*txPending // mapping of soon-to-be free page ids by tx.
+ cache map[pgid]bool // fast lookup of all free and pending page ids.
+ freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size
+ forwardMap map[pgid]uint64 // key is start pgid, value is its span size
+ backwardMap map[pgid]uint64 // key is end pgid, value is its span size
+ allocate func(txid txid, n int) pgid // the freelist allocate func
+ free_count func() int // the function which gives you free page number
+ mergeSpans func(ids pgids) // the mergeSpan func
+ getFreePageIDs func() []pgid // get free pgids func
+ readIDs func(pgids []pgid) // readIDs func reads list of pages and init the freelist
+}
+
+// newFreelist returns an empty, initialized freelist.
+func newFreelist(freelistType FreelistType) *freelist {
+ f := &freelist{
+ freelistType: freelistType,
+ allocs: make(map[pgid]txid),
+ pending: make(map[txid]*txPending),
+ cache: make(map[pgid]bool),
+ freemaps: make(map[uint64]pidSet),
+ forwardMap: make(map[pgid]uint64),
+ backwardMap: make(map[pgid]uint64),
+ }
+
+ if freelistType == FreelistMapType {
+ f.allocate = f.hashmapAllocate
+ f.free_count = f.hashmapFreeCount
+ f.mergeSpans = f.hashmapMergeSpans
+ f.getFreePageIDs = f.hashmapGetFreePageIDs
+ f.readIDs = f.hashmapReadIDs
+ } else {
+ f.allocate = f.arrayAllocate
+ f.free_count = f.arrayFreeCount
+ f.mergeSpans = f.arrayMergeSpans
+ f.getFreePageIDs = f.arrayGetFreePageIDs
+ f.readIDs = f.arrayReadIDs
+ }
+
+ return f
+}
+
+// size returns the size of the page after serialization.
+func (f *freelist) size() int {
+ n := f.count()
+ if n >= 0xFFFF {
+ // The first element will be used to store the count. See freelist.write.
+ n++
+ }
+ return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
+}
+
+// count returns count of pages on the freelist
+func (f *freelist) count() int {
+ return f.free_count() + f.pending_count()
+}
+
+// arrayFreeCount returns count of free pages(array version)
+func (f *freelist) arrayFreeCount() int {
+ return len(f.ids)
+}
+
+// pending_count returns count of pending pages
+func (f *freelist) pending_count() int {
+ var count int
+ for _, txp := range f.pending {
+ count += len(txp.ids)
+ }
+ return count
+}
+
+// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
+// f.count returns the minimum length required for dst.
+func (f *freelist) copyall(dst []pgid) {
+ m := make(pgids, 0, f.pending_count())
+ for _, txp := range f.pending {
+ m = append(m, txp.ids...)
+ }
+ sort.Sort(m)
+ mergepgids(dst, f.getFreePageIDs(), m)
+}
+
+// arrayAllocate returns the starting page id of a contiguous list of pages of a given size.
+// If a contiguous block cannot be found then 0 is returned.
+func (f *freelist) arrayAllocate(txid txid, n int) pgid {
+ if len(f.ids) == 0 {
+ return 0
+ }
+
+ var initial, previd pgid
+ for i, id := range f.ids {
+ if id <= 1 {
+ panic(fmt.Sprintf("invalid page allocation: %d", id))
+ }
+
+ // Reset initial page if this is not contiguous.
+ if previd == 0 || id-previd != 1 {
+ initial = id
+ }
+
+ // If we found a contiguous block then remove it and return it.
+ if (id-initial)+1 == pgid(n) {
+ // If we're allocating off the beginning then take the fast path
+ // and just adjust the existing slice. This will use extra memory
+ // temporarily but the append() in free() will realloc the slice
+ // as is necessary.
+ if (i + 1) == n {
+ f.ids = f.ids[i+1:]
+ } else {
+ copy(f.ids[i-n+1:], f.ids[i+1:])
+ f.ids = f.ids[:len(f.ids)-n]
+ }
+
+ // Remove from the free cache.
+ for i := pgid(0); i < pgid(n); i++ {
+ delete(f.cache, initial+i)
+ }
+ f.allocs[initial] = txid
+ return initial
+ }
+
+ previd = id
+ }
+ return 0
+}
+
+// free releases a page and its overflow for a given transaction id.
+// If the page is already free then a panic will occur.
+func (f *freelist) free(txid txid, p *page) {
+ if p.id <= 1 {
+ panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
+ }
+
+ // Free page and all its overflow pages.
+ txp := f.pending[txid]
+ if txp == nil {
+ txp = &txPending{}
+ f.pending[txid] = txp
+ }
+ allocTxid, ok := f.allocs[p.id]
+ if ok {
+ delete(f.allocs, p.id)
+ } else if (p.flags & freelistPageFlag) != 0 {
+ // Freelist is always allocated by prior tx.
+ allocTxid = txid - 1
+ }
+
+ for id := p.id; id <= p.id+pgid(p.overflow); id++ {
+ // Verify that page is not already free.
+ if f.cache[id] {
+ panic(fmt.Sprintf("page %d already freed", id))
+ }
+ // Add to the freelist and cache.
+ txp.ids = append(txp.ids, id)
+ txp.alloctx = append(txp.alloctx, allocTxid)
+ f.cache[id] = true
+ }
+}
+
+// release moves all page ids for a transaction id (or older) to the freelist.
+func (f *freelist) release(txid txid) {
+ m := make(pgids, 0)
+ for tid, txp := range f.pending {
+ if tid <= txid {
+ // Move transaction's pending pages to the available freelist.
+ // Don't remove from the cache since the page is still free.
+ m = append(m, txp.ids...)
+ delete(f.pending, tid)
+ }
+ }
+ f.mergeSpans(m)
+}
+
+// releaseRange moves pending pages allocated within an extent [begin,end] to the free list.
+func (f *freelist) releaseRange(begin, end txid) {
+ if begin > end {
+ return
+ }
+ var m pgids
+ for tid, txp := range f.pending {
+ if tid < begin || tid > end {
+ continue
+ }
+ // Don't recompute freed pages if ranges haven't updated.
+ if txp.lastReleaseBegin == begin {
+ continue
+ }
+ for i := 0; i < len(txp.ids); i++ {
+ if atx := txp.alloctx[i]; atx < begin || atx > end {
+ continue
+ }
+ m = append(m, txp.ids[i])
+ txp.ids[i] = txp.ids[len(txp.ids)-1]
+ txp.ids = txp.ids[:len(txp.ids)-1]
+ txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1]
+ txp.alloctx = txp.alloctx[:len(txp.alloctx)-1]
+ i--
+ }
+ txp.lastReleaseBegin = begin
+ if len(txp.ids) == 0 {
+ delete(f.pending, tid)
+ }
+ }
+ f.mergeSpans(m)
+}
+
+// rollback removes the pages from a given pending tx.
+func (f *freelist) rollback(txid txid) {
+ // Remove page ids from cache.
+ txp := f.pending[txid]
+ if txp == nil {
+ return
+ }
+ var m pgids
+ for i, pgid := range txp.ids {
+ delete(f.cache, pgid)
+ tx := txp.alloctx[i]
+ if tx == 0 {
+ continue
+ }
+ if tx != txid {
+ // Pending free aborted; restore page back to alloc list.
+ f.allocs[pgid] = tx
+ } else {
+ // Freed page was allocated by this txn; OK to throw away.
+ m = append(m, pgid)
+ }
+ }
+ // Remove pages from pending list and mark as free if allocated by txid.
+ delete(f.pending, txid)
+ f.mergeSpans(m)
+}
+
+// freed returns whether a given page is in the free list.
+func (f *freelist) freed(pgid pgid) bool {
+ return f.cache[pgid]
+}
+
+// read initializes the freelist from a freelist page.
+func (f *freelist) read(p *page) {
+ if (p.flags & freelistPageFlag) == 0 {
+ panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ()))
+ }
+ // If the page.count is at the max uint16 value (64k) then it's considered
+ // an overflow and the size of the freelist is stored as the first element.
+ idx, count := 0, int(p.count)
+ if count == 0xFFFF {
+ idx = 1
+ count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0])
+ }
+
+ // Copy the list of page ids from the freelist.
+ if count == 0 {
+ f.ids = nil
+ } else {
+ ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx : idx+count]
+
+ // copy the ids, so we don't modify on the freelist page directly
+ idsCopy := make([]pgid, count)
+ copy(idsCopy, ids)
+ // Make sure they're sorted.
+ sort.Sort(pgids(idsCopy))
+
+ f.readIDs(idsCopy)
+ }
+}
+
+// arrayReadIDs initializes the freelist from a given list of ids.
+func (f *freelist) arrayReadIDs(ids []pgid) {
+ f.ids = ids
+ f.reindex()
+}
+
+func (f *freelist) arrayGetFreePageIDs() []pgid {
+ return f.ids
+}
+
+// write writes the page ids onto a freelist page. All free and pending ids are
+// saved to disk since in the event of a program crash, all pending ids will
+// become free.
+func (f *freelist) write(p *page) error {
+ // Combine the old free pgids and pgids waiting on an open transaction.
+
+ // Update the header flag.
+ p.flags |= freelistPageFlag
+
+ // The page.count can only hold up to 64k elements so if we overflow that
+ // number then we handle it by putting the size in the first element.
+ lenids := f.count()
+ if lenids == 0 {
+ p.count = uint16(lenids)
+ } else if lenids < 0xFFFF {
+ p.count = uint16(lenids)
+ f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
+ } else {
+ p.count = 0xFFFF
+ ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids)
+ f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
+ }
+
+ return nil
+}
+
+// reload reads the freelist from a page and filters out pending items.
+func (f *freelist) reload(p *page) {
+ f.read(p)
+
+ // Build a cache of only pending pages.
+ pcache := make(map[pgid]bool)
+ for _, txp := range f.pending {
+ for _, pendingID := range txp.ids {
+ pcache[pendingID] = true
+ }
+ }
+
+ // Check each page in the freelist and build a new available freelist
+ // with any pages not in the pending lists.
+ var a []pgid
+ for _, id := range f.getFreePageIDs() {
+ if !pcache[id] {
+ a = append(a, id)
+ }
+ }
+
+ f.readIDs(a)
+}
+
+// reindex rebuilds the free cache based on available and pending free lists.
+func (f *freelist) reindex() {
+ ids := f.getFreePageIDs()
+ f.cache = make(map[pgid]bool, len(ids))
+ for _, id := range ids {
+ f.cache[id] = true
+ }
+ for _, txp := range f.pending {
+ for _, pendingID := range txp.ids {
+ f.cache[pendingID] = true
+ }
+ }
+}
+
+// arrayMergeSpans try to merge list of pages(represented by pgids) with existing spans but using array
+func (f *freelist) arrayMergeSpans(ids pgids) {
+ sort.Sort(ids)
+ f.ids = pgids(f.ids).merge(ids)
+}
diff --git a/vendor/github.com/etcd-io/bbolt/freelist_hmap.go b/vendor/github.com/etcd-io/bbolt/freelist_hmap.go
new file mode 100644
index 000000000..6a03a6c3c
--- /dev/null
+++ b/vendor/github.com/etcd-io/bbolt/freelist_hmap.go
@@ -0,0 +1,178 @@
+package bbolt
+
+import "sort"
+
+// hashmapFreeCount returns count of free pages(hashmap version)
+func (f *freelist) hashmapFreeCount() int {
+ // use the forwardmap to get the total count
+ count := 0
+ for _, size := range f.forwardMap {
+ count += int(size)
+ }
+ return count
+}
+
+// hashmapAllocate serves the same purpose as arrayAllocate, but use hashmap as backend
+func (f *freelist) hashmapAllocate(txid txid, n int) pgid {
+ if n == 0 {
+ return 0
+ }
+
+ // if we have a exact size match just return short path
+ if bm, ok := f.freemaps[uint64(n)]; ok {
+ for pid := range bm {
+ // remove the span
+ f.delSpan(pid, uint64(n))
+
+ f.allocs[pid] = txid
+
+ for i := pgid(0); i < pgid(n); i++ {
+ delete(f.cache, pid+pgid(i))
+ }
+ return pid
+ }
+ }
+
+ // lookup the map to find larger span
+ for size, bm := range f.freemaps {
+ if size < uint64(n) {
+ continue
+ }
+
+ for pid := range bm {
+ // remove the initial
+ f.delSpan(pid, uint64(size))
+
+ f.allocs[pid] = txid
+
+ remain := size - uint64(n)
+
+ // add remain span
+ f.addSpan(pid+pgid(n), remain)
+
+ for i := pgid(0); i < pgid(n); i++ {
+ delete(f.cache, pid+pgid(i))
+ }
+ return pid
+ }
+ }
+
+ return 0
+}
+
+// hashmapReadIDs reads pgids as input an initial the freelist(hashmap version)
+func (f *freelist) hashmapReadIDs(pgids []pgid) {
+ f.init(pgids)
+
+ // Rebuild the page cache.
+ f.reindex()
+}
+
+// hashmapGetFreePageIDs returns the sorted free page ids
+func (f *freelist) hashmapGetFreePageIDs() []pgid {
+ count := f.free_count()
+ if count == 0 {
+ return nil
+ }
+
+ m := make([]pgid, 0, count)
+ for start, size := range f.forwardMap {
+ for i := 0; i < int(size); i++ {
+ m = append(m, start+pgid(i))
+ }
+ }
+ sort.Sort(pgids(m))
+
+ return m
+}
+
+// hashmapMergeSpans try to merge list of pages(represented by pgids) with existing spans
+func (f *freelist) hashmapMergeSpans(ids pgids) {
+ for _, id := range ids {
+ // try to see if we can merge and update
+ f.mergeWithExistingSpan(id)
+ }
+}
+
+// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward
+func (f *freelist) mergeWithExistingSpan(pid pgid) {
+ prev := pid - 1
+ next := pid + 1
+
+ preSize, mergeWithPrev := f.backwardMap[prev]
+ nextSize, mergeWithNext := f.forwardMap[next]
+ newStart := pid
+ newSize := uint64(1)
+
+ if mergeWithPrev {
+ //merge with previous span
+ start := prev + 1 - pgid(preSize)
+ f.delSpan(start, preSize)
+
+ newStart -= pgid(preSize)
+ newSize += preSize
+ }
+
+ if mergeWithNext {
+ // merge with next span
+ f.delSpan(next, nextSize)
+ newSize += nextSize
+ }
+
+ f.addSpan(newStart, newSize)
+}
+
+func (f *freelist) addSpan(start pgid, size uint64) {
+ f.backwardMap[start-1+pgid(size)] = size
+ f.forwardMap[start] = size
+ if _, ok := f.freemaps[size]; !ok {
+ f.freemaps[size] = make(map[pgid]struct{})
+ }
+
+ f.freemaps[size][start] = struct{}{}
+}
+
+func (f *freelist) delSpan(start pgid, size uint64) {
+ delete(f.forwardMap, start)
+ delete(f.backwardMap, start+pgid(size-1))
+ delete(f.freemaps[size], start)
+ if len(f.freemaps[size]) == 0 {
+ delete(f.freemaps, size)
+ }
+}
+
+// initial from pgids using when use hashmap version
+// pgids must be sorted
+func (f *freelist) init(pgids []pgid) {
+ if len(pgids) == 0 {
+ return
+ }
+
+ size := uint64(1)
+ start := pgids[0]
+
+ if !sort.SliceIsSorted([]pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) {
+ panic("pgids not sorted")
+ }
+
+ f.freemaps = make(map[uint64]pidSet)
+ f.forwardMap = make(map[pgid]uint64)
+ f.backwardMap = make(map[pgid]uint64)
+
+ for i := 1; i < len(pgids); i++ {
+ // continuous page
+ if pgids[i] == pgids[i-1]+1 {
+ size++
+ } else {
+ f.addSpan(start, size)
+
+ size = 1
+ start = pgids[i]
+ }
+ }
+
+ // init the tail
+ if size != 0 && start != 0 {
+ f.addSpan(start, size)
+ }
+}
diff --git a/vendor/github.com/boltdb/bolt/node.go b/vendor/github.com/etcd-io/bbolt/node.go
index 159318b22..6c3fa553e 100644
--- a/vendor/github.com/boltdb/bolt/node.go
+++ b/vendor/github.com/etcd-io/bbolt/node.go
@@ -1,4 +1,4 @@
-package bolt
+package bbolt
import (
"bytes"
@@ -365,7 +365,7 @@ func (n *node) spill() error {
}
// Allocate contiguous space for the node.
- p, err := tx.allocate((node.size() / tx.db.pageSize) + 1)
+ p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize)
if err != nil {
return err
}
diff --git a/vendor/github.com/boltdb/bolt/page.go b/vendor/github.com/etcd-io/bbolt/page.go
index cde403ae8..bca9615f0 100644
--- a/vendor/github.com/boltdb/bolt/page.go
+++ b/vendor/github.com/etcd-io/bbolt/page.go
@@ -1,4 +1,4 @@
-package bolt
+package bbolt
import (
"fmt"
diff --git a/vendor/github.com/boltdb/bolt/tx.go b/vendor/github.com/etcd-io/bbolt/tx.go
index 6700308a2..f50864142 100644
--- a/vendor/github.com/boltdb/bolt/tx.go
+++ b/vendor/github.com/etcd-io/bbolt/tx.go
@@ -1,4 +1,4 @@
-package bolt
+package bbolt
import (
"fmt"
@@ -126,10 +126,7 @@ func (tx *Tx) DeleteBucket(name []byte) error {
// the error is returned to the caller.
func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
return tx.root.ForEach(func(k, v []byte) error {
- if err := fn(k, tx.root.Bucket(k)); err != nil {
- return err
- }
- return nil
+ return fn(k, tx.root.Bucket(k))
})
}
@@ -169,28 +166,18 @@ func (tx *Tx) Commit() error {
// Free the old root bucket.
tx.meta.root.root = tx.root.root
- opgid := tx.meta.pgid
-
- // Free the freelist and allocate new pages for it. This will overestimate
- // the size of the freelist but not underestimate the size (which would be bad).
- tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
- p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
- if err != nil {
- tx.rollback()
- return err
- }
- if err := tx.db.freelist.write(p); err != nil {
- tx.rollback()
- return err
+ // Free the old freelist because commit writes out a fresh freelist.
+ if tx.meta.freelist != pgidNoFreelist {
+ tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
}
- tx.meta.freelist = p.id
- // If the high water mark has moved up then attempt to grow the database.
- if tx.meta.pgid > opgid {
- if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
- tx.rollback()
+ if !tx.db.NoFreelistSync {
+ err := tx.commitFreelist()
+ if err != nil {
return err
}
+ } else {
+ tx.meta.freelist = pgidNoFreelist
}
// Write dirty pages to disk.
@@ -235,6 +222,31 @@ func (tx *Tx) Commit() error {
return nil
}
+func (tx *Tx) commitFreelist() error {
+ // Allocate new pages for the new free list. This will overestimate
+ // the size of the freelist but not underestimate the size (which would be bad).
+ opgid := tx.meta.pgid
+ p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
+ if err != nil {
+ tx.rollback()
+ return err
+ }
+ if err := tx.db.freelist.write(p); err != nil {
+ tx.rollback()
+ return err
+ }
+ tx.meta.freelist = p.id
+ // If the high water mark has moved up then attempt to grow the database.
+ if tx.meta.pgid > opgid {
+ if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
+ tx.rollback()
+ return err
+ }
+ }
+
+ return nil
+}
+
// Rollback closes the transaction and ignores all previous updates. Read-only
// transactions must be rolled back and not committed.
func (tx *Tx) Rollback() error {
@@ -291,7 +303,9 @@ func (tx *Tx) close() {
}
// Copy writes the entire database to a writer.
-// This function exists for backwards compatibility. Use WriteTo() instead.
+// This function exists for backwards compatibility.
+//
+// Deprecated; Use WriteTo() instead.
func (tx *Tx) Copy(w io.Writer) error {
_, err := tx.WriteTo(w)
return err
@@ -305,7 +319,11 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
if err != nil {
return 0, err
}
- defer func() { _ = f.Close() }()
+ defer func() {
+ if cerr := f.Close(); err == nil {
+ err = cerr
+ }
+ }()
// Generate a meta page. We use the same page data for both meta pages.
buf := make([]byte, tx.db.pageSize)
@@ -333,7 +351,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
}
// Move past the meta pages in the file.
- if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil {
+ if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil {
return n, fmt.Errorf("seek: %s", err)
}
@@ -344,7 +362,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
return n, err
}
- return n, f.Close()
+ return n, nil
}
// CopyFile copies the entire database to file at the given path.
@@ -379,6 +397,9 @@ func (tx *Tx) Check() <-chan error {
}
func (tx *Tx) check(ch chan error) {
+ // Force loading free list if opened in ReadOnly mode.
+ tx.db.loadFreelist()
+
// Check if any pages are double freed.
freed := make(map[pgid]bool)
all := make([]pgid, tx.db.freelist.count())
@@ -394,8 +415,10 @@ func (tx *Tx) check(ch chan error) {
reachable := make(map[pgid]*page)
reachable[0] = tx.page(0) // meta0
reachable[1] = tx.page(1) // meta1
- for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
- reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
+ if tx.meta.freelist != pgidNoFreelist {
+ for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
+ reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
+ }
}
// Recursively check buckets.
@@ -453,7 +476,7 @@ func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bo
// allocate returns a contiguous block of memory starting at a given page.
func (tx *Tx) allocate(count int) (*page, error) {
- p, err := tx.db.allocate(count)
+ p, err := tx.db.allocate(tx.meta.txid, count)
if err != nil {
return nil, err
}
@@ -462,7 +485,7 @@ func (tx *Tx) allocate(count int) (*page, error) {
tx.pages[p.id] = p
// Update statistics.
- tx.stats.PageCount++
+ tx.stats.PageCount += count
tx.stats.PageAlloc += count * tx.db.pageSize
return p, nil