From 4d1cd450de322c77c4b7a8fdb1fe5ed930c06b11 Mon Sep 17 00:00:00 2001
From: hannandarryl <hannandarryl@gmail.com>
Date: Fri, 11 Feb 2022 14:05:55 +0000
Subject: [PATCH] fixed transforms in video loader

---
 sparse_coding_torch/video_loader.py | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/sparse_coding_torch/video_loader.py b/sparse_coding_torch/video_loader.py
index 647e70f..a318a1c 100644
--- a/sparse_coding_torch/video_loader.py
+++ b/sparse_coding_torch/video_loader.py
@@ -20,6 +20,7 @@ from torch import nn
 import torchvision.transforms.functional as tv_f
 import csv
 import random
+import cv2
 
 # def get_augmented_examples(clip):
 #     augmented_examples = []
@@ -226,7 +227,7 @@ class YoloClipLoader(Dataset):
          
         self.labels = [name for name in listdir(yolo_output_path) if isdir(join(yolo_output_path, name))]
         self.clips = []
-        if exists(clip_cache_file) and sparse_model:
+        if exists(clip_cache_file):
             self.clips = torch.load(open(clip_cache_file, 'rb'))
         else:
             for label in self.labels:
@@ -234,6 +235,8 @@ class YoloClipLoader(Dataset):
                 videos = list(listdir(join(yolo_output_path, label)))
                 for vi in tqdm(range(len(videos))):
                     video = videos[vi]
+                    counter = 0
+                    all_trimmed = []
                     with open(abspath(join(yolo_output_path, label, video, 'result.json'))) as fin:
                         results = json.load(fin)
                         max_frame = len(results)
@@ -268,7 +271,7 @@ class YoloClipLoader(Dataset):
 
                                 final_clip = frames[:, :, lower_y:upper_y, lower_x:upper_x]
 
-                                if self.transform and sparse_model:
+                                if self.transform:
                                     final_clip = self.transform(final_clip)
 
                                 if sparse_model:
-- 
GitLab