diff --git a/scripts/convert.py b/scripts/convert.py index 4956172517..9f07bfb02d 100644 --- a/scripts/convert.py +++ b/scripts/convert.py @@ -791,7 +791,7 @@ def _get_io_sizes(self) -> Dict[str, int]: input_shape = [input_shape] if not isinstance(input_shape, list) else input_shape output_shape = self._model.model.output_shape output_shape = [output_shape] if not isinstance(output_shape, list) else output_shape - retval = dict(input=input_shape[0][1], output=output_shape[-1][1]) + retval = {"input": input_shape[0][1], "output": output_shape[-1][1]} logger.debug(retval) return retval @@ -833,7 +833,6 @@ def _get_batchsize(self, queue_size: int) -> int: is_cpu = GPUStats().device_count == 0 batchsize = 1 if is_cpu else self._model.config["convert_batchsize"] batchsize = min(queue_size, batchsize) - logger.debug("Batchsize: %s", batchsize) logger.debug("Got batchsize: %s", batchsize) return batchsize diff --git a/tools/preview/preview.py b/tools/preview/preview.py index a5e83df556..f6ab16636e 100644 --- a/tools/preview/preview.py +++ b/tools/preview/preview.py @@ -302,7 +302,7 @@ def __init__(self, app: Preview, arguments: "Namespace", sample_size: int) -> No self._indices = self._get_indices() self._predictor = Predict(queue_manager.get_queue("preview_predict_in"), - sample_size, + self._sample_size, arguments) self._app._display.set_centering(self._predictor.centering) self.generate() @@ -385,6 +385,7 @@ def _get_indices(self) -> List[List[int]]: """ # Remove start and end values to get a list divisible by self.sample_size no_files = len(self._filelist) + self._sample_size = min(self._sample_size, no_files) crop = no_files % self._sample_size top_tail = list(range(no_files))[ crop // 2:no_files - (crop - (crop // 2))]