Skip to content
Snippets Groups Projects
Commit 4d1cd450 authored by hannandarryl's avatar hannandarryl
Browse files

fixed transforms in video loader

parent 092b5e42
No related branches found
No related tags found
No related merge requests found
......@@ -20,6 +20,7 @@ from torch import nn
import torchvision.transforms.functional as tv_f
import csv
import random
import cv2
# def get_augmented_examples(clip):
# augmented_examples = []
......@@ -226,7 +227,7 @@ class YoloClipLoader(Dataset):
self.labels = [name for name in listdir(yolo_output_path) if isdir(join(yolo_output_path, name))]
self.clips = []
if exists(clip_cache_file) and sparse_model:
if exists(clip_cache_file):
self.clips = torch.load(open(clip_cache_file, 'rb'))
else:
for label in self.labels:
......@@ -234,6 +235,8 @@ class YoloClipLoader(Dataset):
videos = list(listdir(join(yolo_output_path, label)))
for vi in tqdm(range(len(videos))):
video = videos[vi]
counter = 0
all_trimmed = []
with open(abspath(join(yolo_output_path, label, video, 'result.json'))) as fin:
results = json.load(fin)
max_frame = len(results)
......@@ -268,7 +271,7 @@ class YoloClipLoader(Dataset):
final_clip = frames[:, :, lower_y:upper_y, lower_x:upper_x]
if self.transform and sparse_model:
if self.transform:
final_clip = self.transform(final_clip)
if sparse_model:
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment