summaryrefslogtreecommitdiff
path: root/pkg/machine/pull.go
diff options
context:
space:
mode:
authorAshley Cui <acui@redhat.com>2022-07-08 20:10:25 -0400
committerMatthew Heon <matthew.heon@pm.me>2022-07-26 13:32:33 -0400
commit17dbce2fb060b4803b2dae4eb6b78fdebea5b61f (patch)
treed573c5222cd767a14e1b8bc0519e4ccc23bc6663 /pkg/machine/pull.go
parente473c5e4b741cef2c1174cb4ec51000f443e6877 (diff)
downloadpodman-17dbce2fb060b4803b2dae4eb6b78fdebea5b61f.tar.gz
podman-17dbce2fb060b4803b2dae4eb6b78fdebea5b61f.tar.bz2
podman-17dbce2fb060b4803b2dae4eb6b78fdebea5b61f.zip
Clean up cached machine images
When initing machines, we download a machine image, and uncompress and copy the image for the actual vm image. When a user constantly pulls new machines, there may be a buildup of old, unused machine images. This commit cleans ups the unused cached images. Changes: - If the machine is pulled from a URL or from the FCOS releases, we pull them into XDG_DATA_HOME/containers/podman/machine/vmType/cache - Cache cleanups only happen if there is a cache miss, and we need to pull a new image - For Fedora and FCOS, we actually use the cache, so we go through the cache dir and remove any images older than 2 weeks (FCOS's release cycle), on a cache miss. - For generic files pulled from a URL, we don't actually cache, so we delete the pulled file immediately after creating a machine image - For generic files from a local path, the original file will never be cleaned up Note that because we cache in a different dir, this will not clean up old images pulled before this commit. [NO NEW TESTS NEEDED] Signed-off-by: Ashley Cui <acui@redhat.com>
Diffstat (limited to 'pkg/machine/pull.go')
-rw-r--r--pkg/machine/pull.go54
1 files changed, 46 insertions, 8 deletions
diff --git a/pkg/machine/pull.go b/pkg/machine/pull.go
index 7e6f01bad..08baa7df8 100644
--- a/pkg/machine/pull.go
+++ b/pkg/machine/pull.go
@@ -5,6 +5,7 @@ package machine
import (
"bufio"
+ "errors"
"fmt"
"io"
"io/ioutil"
@@ -39,6 +40,10 @@ func NewGenericDownloader(vmType, vmName, pullPath string) (DistributionDownload
if err != nil {
return nil, err
}
+ cacheDir, err := GetCacheDir(vmType)
+ if err != nil {
+ return nil, err
+ }
dl := Download{}
// Is pullpath a file or url?
getURL, err := url2.Parse(pullPath)
@@ -48,25 +53,23 @@ func NewGenericDownloader(vmType, vmName, pullPath string) (DistributionDownload
if len(getURL.Scheme) > 0 {
urlSplit := strings.Split(getURL.Path, "/")
imageName = urlSplit[len(urlSplit)-1]
- dl.LocalUncompressedFile = filepath.Join(dataDir, imageName)
dl.URL = getURL
- dl.LocalPath = filepath.Join(dataDir, imageName)
+ dl.LocalPath = filepath.Join(cacheDir, imageName)
} else {
// Dealing with FilePath
imageName = filepath.Base(pullPath)
- dl.LocalUncompressedFile = filepath.Join(dataDir, imageName)
dl.LocalPath = pullPath
}
dl.VMName = vmName
dl.ImageName = imageName
+ dl.LocalUncompressedFile = filepath.Join(dataDir, imageName)
// The download needs to be pulled into the datadir
gd := GenericDownload{Download: dl}
- gd.LocalUncompressedFile = gd.getLocalUncompressedName()
return gd, nil
}
-func (d Download) getLocalUncompressedName() string {
+func (d Download) getLocalUncompressedFile(dataDir string) string {
var (
extension string
)
@@ -78,8 +81,8 @@ func (d Download) getLocalUncompressedName() string {
case strings.HasSuffix(d.LocalPath, ".xz"):
extension = ".xz"
}
- uncompressedFilename := filepath.Join(filepath.Dir(d.LocalPath), d.VMName+"_"+d.ImageName)
- return strings.TrimSuffix(uncompressedFilename, extension)
+ uncompressedFilename := d.VMName + "_" + d.ImageName
+ return filepath.Join(dataDir, strings.TrimSuffix(uncompressedFilename, extension))
}
func (g GenericDownload) Get() *Download {
@@ -91,6 +94,18 @@ func (g GenericDownload) HasUsableCache() (bool, error) {
return g.URL == nil, nil
}
+// CleanCache cleans out downloaded uncompressed image files
+func (g GenericDownload) CleanCache() error {
+ // Remove any image that has been downloaded via URL
+ // We never read from cache for generic downloads
+ if g.URL != nil {
+ if err := os.Remove(g.LocalPath); err != nil && !errors.Is(err, os.ErrNotExist) {
+ return err
+ }
+ }
+ return nil
+}
+
func DownloadImage(d DistributionDownload) error {
// check if the latest image is already present
ok, err := d.HasUsableCache()
@@ -101,8 +116,14 @@ func DownloadImage(d DistributionDownload) error {
if err := DownloadVMImage(d.Get().URL, d.Get().LocalPath); err != nil {
return err
}
+ // Clean out old cached images, since we didn't find needed image in cache
+ defer func() {
+ if err = d.CleanCache(); err != nil {
+ logrus.Warnf("error cleaning machine image cache: %s", err)
+ }
+ }()
}
- return Decompress(d.Get().LocalPath, d.Get().getLocalUncompressedName())
+ return Decompress(d.Get().LocalPath, d.Get().LocalUncompressedFile)
}
// DownloadVMImage downloads a VM image from url to given path
@@ -253,3 +274,20 @@ func decompressEverythingElse(src string, output io.WriteCloser) error {
_, err = io.Copy(output, uncompressStream)
return err
}
+
+func removeImageAfterExpire(dir string, expire time.Duration) error {
+ now := time.Now()
+ err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+ // Delete any cache files that are older than expiry date
+ if !info.IsDir() && (now.Sub(info.ModTime()) > expire) {
+ err := os.Remove(path)
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ logrus.Warnf("unable to clean up cached image: %s", path)
+ } else {
+ logrus.Debugf("cleaning up cached image: %s", path)
+ }
+ }
+ return nil
+ })
+ return err
+}