diff --git a/client/v2_2/docker_image_.py b/client/v2_2/docker_image_.py index 2025a5036..6319aa3f4 100755 --- a/client/v2_2/docker_image_.py +++ b/client/v2_2/docker_image_.py @@ -277,6 +277,9 @@ def _tags(self): def tags(self): return self._tags().get('tags', []) + def name(self): + return self._name + def manifests(self): payload = self._tags() if 'manifest' not in payload: diff --git a/client/v2_2/save_.py b/client/v2_2/save_.py index da2dca90e..03f40a863 100755 --- a/client/v2_2/save_.py +++ b/client/v2_2/save_.py @@ -22,6 +22,7 @@ import io import json import os +import sys import tarfile import concurrent.futures @@ -144,7 +145,8 @@ def tarball(name, image, def fast(image, directory, threads = 1, - cache_directory = None): + cache_directory = None, + print_progress = False): """Produce a FromDisk compatible file layout under the provided directory. After calling this, the following filesystem will exist: @@ -166,6 +168,7 @@ def fast(image, directory: an existing empty directory under which to save the layout. threads: the number of threads to use when performing the upload. cache_directory: directory that stores file cache. + print_progress: whether to print pull status messages to stderr. Returns: A tuple whose first element is the path to the config file, and whose second @@ -174,13 +177,15 @@ def fast(image, """ def write_file(name, accessor, - arg): + arg, message = None): + if print_progress and message is not None: + sys.stderr.write(message + "\n") with io.open(name, u'wb') as f: f.write(accessor(arg)) def write_file_and_store(name, accessor, - arg, cached_layer): - write_file(cached_layer, accessor, arg) + arg, cached_layer, message = None): + write_file(cached_layer, accessor, arg, message) link(cached_layer, name) def link(source, dest): @@ -225,11 +230,13 @@ def valid(cached_layer, digest): 'unused') idx = 0 + num_layers = len(image.fs_layers()) layers = [] for blob in reversed(image.fs_layers()): # Create a local copy layer_name = os.path.join(directory, '%03d.tar.gz' % idx) digest_name = os.path.join(directory, '%03d.sha256' % idx) + message = 'Downloading from {} (layer {}/{})'.format(image.name(), idx+1, num_layers) # Strip the sha256: prefix digest = blob[7:].encode('utf8') f = executor.submit( @@ -247,10 +254,10 @@ def valid(cached_layer, digest): future_to_params[f] = layer_name else: f = executor.submit(write_file_and_store, layer_name, image.blob, - blob, cached_layer) + blob, cached_layer, message) future_to_params[f] = layer_name else: - f = executor.submit(write_file, layer_name, image.blob, blob) + f = executor.submit(write_file, layer_name, image.blob, blob, message) future_to_params[f] = layer_name layers.append((digest_name, layer_name)) diff --git a/tools/fast_puller_.py b/tools/fast_puller_.py index d7601d2bc..310281e98 100755 --- a/tools/fast_puller_.py +++ b/tools/fast_puller_.py @@ -62,6 +62,9 @@ 'located. Overiddes the value from DOCKER_CONFIG') parser.add_argument( '--cache', action='store', help='Image\'s files cache directory.') +parser.add_argument( + '--print-progress', action='store_true', help='Print pull progresses to stderr.') + _THREADS = 8 @@ -125,7 +128,8 @@ def main(): v2_2_img, args.directory, threads=_THREADS, - cache_directory=args.cache) + cache_directory=args.cache, + print_progress=args.print_progress) return logging.info('Pulling v2 image from %r ...', name) @@ -135,7 +139,8 @@ def main(): v2_2_img, args.directory, threads=_THREADS, - cache_directory=args.cache) + cache_directory=args.cache, + print_progress=args.print_progress) return # pylint: disable=broad-except except Exception as e: