diff --git a/sparse_coding_torch/video_loader.py b/sparse_coding_torch/video_loader.py index 647e70f894942a2cfdf25cb6ac0cfc9ece397e46..a318a1c4da9d87b685cf419f3d161ad3a0284208 100644 --- a/sparse_coding_torch/video_loader.py +++ b/sparse_coding_torch/video_loader.py @@ -20,6 +20,7 @@ from torch import nn import torchvision.transforms.functional as tv_f import csv import random +import cv2 # def get_augmented_examples(clip): # augmented_examples = [] @@ -226,7 +227,7 @@ class YoloClipLoader(Dataset): self.labels = [name for name in listdir(yolo_output_path) if isdir(join(yolo_output_path, name))] self.clips = [] - if exists(clip_cache_file) and sparse_model: + if exists(clip_cache_file): self.clips = torch.load(open(clip_cache_file, 'rb')) else: for label in self.labels: @@ -234,6 +235,8 @@ class YoloClipLoader(Dataset): videos = list(listdir(join(yolo_output_path, label))) for vi in tqdm(range(len(videos))): video = videos[vi] + counter = 0 + all_trimmed = [] with open(abspath(join(yolo_output_path, label, video, 'result.json'))) as fin: results = json.load(fin) max_frame = len(results) @@ -268,7 +271,7 @@ class YoloClipLoader(Dataset): final_clip = frames[:, :, lower_y:upper_y, lower_x:upper_x] - if self.transform and sparse_model: + if self.transform: final_clip = self.transform(final_clip) if sparse_model: