From 87ec68c4f66a5d1f32aefeaf590d945257a04e04 Mon Sep 17 00:00:00 2001
From: hannandarryl <hannandarryl@gmail.com>
Date: Fri, 24 Jun 2022 14:26:09 +0000
Subject: [PATCH] added changes for onsd data

---
 sparse_coding_torch/onsd/classifier_model.py  | 12 ++++++---
 sparse_coding_torch/onsd/load_data.py         |  7 +++--
 sparse_coding_torch/onsd/train_classifier.py  | 27 ++++++++++---------
 .../onsd/train_sparse_model.py                | 14 +++++-----
 sparse_coding_torch/pnb/load_data.py          |  8 +++---
 sparse_coding_torch/pnb/train_classifier.py   | 22 +++++++++++----
 yolov4/core/yolov4.py                         |  4 +--
 yolov4/detect.py                              | 12 ++++-----
 yolov4/get_bounding_boxes.py                  |  2 +-
 9 files changed, 64 insertions(+), 44 deletions(-)

diff --git a/sparse_coding_torch/onsd/classifier_model.py b/sparse_coding_torch/onsd/classifier_model.py
index 1162dc1..e11cd4b 100644
--- a/sparse_coding_torch/onsd/classifier_model.py
+++ b/sparse_coding_torch/onsd/classifier_model.py
@@ -9,11 +9,13 @@ import torch.nn as nn
 from sparse_coding_torch.utils import VideoGrayScaler, MinMaxScaler
     
 class ONSDClassifier(keras.layers.Layer):
-    def __init__(self):
+    def __init__(self, sparse_checkpoint):
         super(ONSDClassifier, self).__init__()
+        
+        self.sparse_filters = tf.squeeze(keras.models.load_model(sparse_checkpoint).weights[0], axis=0)
 
-        self.conv_1 = keras.layers.Conv2D(24, kernel_size=8, strides=4, activation='relu', padding='valid')
-        self.conv_2 = keras.layers.Conv2D(24, kernel_size=4, strides=2, activation='relu', padding='valid')
+        self.conv_1 = keras.layers.Conv2D(48, kernel_size=8, strides=2, activation='relu', padding='valid')
+        self.conv_2 = keras.layers.Conv2D(64, kernel_size=4, strides=2, activation='relu', padding='valid')
 
         self.flatten = keras.layers.Flatten()
 
@@ -27,7 +29,9 @@ class ONSDClassifier(keras.layers.Layer):
 
 #     @tf.function
     def call(self, activations):
-        x = self.conv_1(activations)
+        x = tf.nn.conv2d(activations, self.sparse_filters, strides=4, padding='VALID')
+        x = tf.nn.relu(x)
+        x = self.conv_1(x)
         x = self.conv_2(x)
         x = self.flatten(x)
 #         x = self.ff_1(x)
diff --git a/sparse_coding_torch/onsd/load_data.py b/sparse_coding_torch/onsd/load_data.py
index de53a52..8ab3b7c 100644
--- a/sparse_coding_torch/onsd/load_data.py
+++ b/sparse_coding_torch/onsd/load_data.py
@@ -10,7 +10,7 @@ import csv
 from sklearn.model_selection import train_test_split, GroupShuffleSplit, LeaveOneGroupOut, LeaveOneOut, StratifiedGroupKFold, StratifiedKFold, KFold, ShuffleSplit
     
 def load_onsd_videos(batch_size, input_size, yolo_model=None, mode=None, n_splits=None):   
-    video_path = "/shared_data/bamc_onsd_data/preliminary_onsd_data"
+    video_path = "/shared_data/bamc_onsd_data/revised_onsd_data"
     
     transforms = torchvision.transforms.Compose(
     [torchvision.transforms.Grayscale(1),
@@ -18,7 +18,10 @@ def load_onsd_videos(batch_size, input_size, yolo_model=None, mode=None, n_split
      torchvision.transforms.Resize(input_size[:2])
     ])
     augment_transforms = torchvision.transforms.Compose(
-    [torchvision.transforms.RandomRotation(15)
+    [torchvision.transforms.RandomRotation(45),
+     torchvision.transforms.RandomHorizontalFlip(0.5),
+     torchvision.transforms.RandomAdjustSharpness(0.05)
+     
     ])
     dataset = ONSDLoader(video_path, input_size[1], input_size[0], transform=transforms, augmentation=augment_transforms, yolo_model=yolo_model)
     
diff --git a/sparse_coding_torch/onsd/train_classifier.py b/sparse_coding_torch/onsd/train_classifier.py
index 210da9b..5427e92 100644
--- a/sparse_coding_torch/onsd/train_classifier.py
+++ b/sparse_coding_torch/onsd/train_classifier.py
@@ -1,3 +1,6 @@
+import tensorflow.keras as keras
+import tensorflow as tf
+# tf.debugging.set_log_device_placement(True)
 import torch
 import torch.nn as nn
 import torch.nn.functional as F
@@ -14,20 +17,20 @@ import numpy as np
 from sklearn.metrics import f1_score, accuracy_score, confusion_matrix
 import random
 import pickle
-import tensorflow.keras as keras
-import tensorflow as tf
-from sparse_coding_torch.onsd.train_sparse_model import sparse_loss
+# from sparse_coding_torch.onsd.train_sparse_model import sparse_loss
 from yolov4.get_bounding_boxes import YoloModel
 import torchvision
 from sparse_coding_torch.utils import VideoGrayScaler, MinMaxScaler
 import glob
 import cv2
 
-configproto = tf.compat.v1.ConfigProto()
-configproto.gpu_options.polling_inactive_delay_msecs = 5000
-configproto.gpu_options.allow_growth = True
-sess = tf.compat.v1.Session(config=configproto) 
-tf.compat.v1.keras.backend.set_session(sess)
+# configproto = tf.compat.v1.ConfigProto()
+# configproto.gpu_options.polling_inactive_delay_msecs = 5000
+# configproto.gpu_options.allow_growth = True
+# sess = tf.compat.v1.Session(config=configproto) 
+# session = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(log_device_placement=True))
+# tf.compat.v1.keras.backend.set_session(sess)
+# tf.debugging.set_log_device_placement(True)
 
 def calculate_onsd_scores(input_videos, labels, yolo_model, classifier_model, transform):
     all_predictions = []
@@ -99,12 +102,12 @@ if __name__ == "__main__":
     parser.add_argument('--save_train_test_splits', action='store_true')
     parser.add_argument('--run_2d', action='store_true')
     parser.add_argument('--balance_classes', action='store_true')
-    parser.add_argument('--dataset', default='pnb', type=str)
+    parser.add_argument('--dataset', default='onsd', type=str)
     parser.add_argument('--train_sparse', action='store_true')
     parser.add_argument('--mixing_ratio', type=float, default=1.0)
     parser.add_argument('--sparse_lr', type=float, default=0.003)
-    parser.add_argument('--crop_height', type=int, default=285)
-    parser.add_argument('--crop_width', type=int, default=350)
+    parser.add_argument('--crop_height', type=int, default=400)
+    parser.add_argument('--crop_width', type=int, default=400)
     parser.add_argument('--scale_factor', type=int, default=1)
     parser.add_argument('--clip_depth', type=int, default=5)
     parser.add_argument('--frames_to_skip', type=int, default=1)
@@ -174,7 +177,7 @@ if __name__ == "__main__":
             classifier_model = keras.models.load_model(args.checkpoint)
         else:
             classifier_inputs = keras.Input(shape=(image_height, image_width, 1))
-            classifier_outputs = ONSDClassifier()(classifier_inputs)
+            classifier_outputs = ONSDClassifier(args.sparse_checkpoint)(classifier_inputs)
 
             classifier_model = keras.Model(inputs=classifier_inputs, outputs=classifier_outputs)
 
diff --git a/sparse_coding_torch/onsd/train_sparse_model.py b/sparse_coding_torch/onsd/train_sparse_model.py
index 0352d53..c439de7 100644
--- a/sparse_coding_torch/onsd/train_sparse_model.py
+++ b/sparse_coding_torch/onsd/train_sparse_model.py
@@ -12,6 +12,7 @@ import tensorflow.keras as keras
 import tensorflow as tf
 from sparse_coding_torch.sparse_model import normalize_weights_3d, normalize_weights, SparseCode, load_pytorch_weights, ReconSparse
 import random
+from sparse_coding_torch.utils import plot_filters
 
 def sparse_loss(images, recon, activations, batch_size, lam, stride):
     loss = 0.5 * (1/batch_size) * tf.math.reduce_sum(tf.math.pow(images - recon, 2))
@@ -22,7 +23,7 @@ if __name__ == "__main__":
     parser = argparse.ArgumentParser()
     parser.add_argument('--batch_size', default=32, type=int)
     parser.add_argument('--kernel_size', default=15, type=int)
-    parser.add_argument('--kernel_depth', default=5, type=int)
+    parser.add_argument('--kernel_depth', default=1, type=int)
     parser.add_argument('--num_kernels', default=32, type=int)
     parser.add_argument('--stride', default=1, type=int)
     parser.add_argument('--max_activation_iter', default=300, type=int)
@@ -39,7 +40,7 @@ if __name__ == "__main__":
     parser.add_argument('--crop_height', type=int, default=400)
     parser.add_argument('--crop_width', type=int, default=400)
     parser.add_argument('--scale_factor', type=int, default=1)
-    parser.add_argument('--clip_depth', type=int, default=5)
+    parser.add_argument('--clip_depth', type=int, default=1)
     parser.add_argument('--frames_to_skip', type=int, default=1)
     
 
@@ -77,11 +78,11 @@ if __name__ == "__main__":
     example_data = next(iter(train_loader))
 
     if args.run_2d:
-        inputs = keras.Input(shape=(image_height, image_width, 5))
+        inputs = keras.Input(shape=(image_height, image_width, clip_depth))
     else:
-        inputs = keras.Input(shape=(5, image_height, image_width, 1))
+        inputs = keras.Input(shape=(clip_depth, image_height, image_width, 1))
         
-    filter_inputs = keras.Input(shape=(5, args.kernel_size, args.kernel_size, 1, args.num_kernels), dtype='float32')
+    filter_inputs = keras.Input(shape=(clip_depth, args.kernel_size, args.kernel_size, 1, args.num_kernels), dtype='float32')
 
     output = SparseCode(batch_size=args.batch_size, image_height=image_height, image_width=image_width, clip_depth=clip_depth, in_channels=1, out_channels=args.num_kernels, kernel_size=args.kernel_size, kernel_depth=args.kernel_depth, stride=args.stride, lam=args.lam, activation_lr=args.activation_lr, max_activation_iter=args.max_activation_iter, run_2d=args.run_2d)(inputs, filter_inputs)
 
@@ -117,8 +118,7 @@ if __name__ == "__main__":
         num_iters = 0
 
         for labels, local_batch, vid_f in tqdm(train_loader):
-            if local_batch.size(0) != args.batch_size:
-                continue
+            local_batch = local_batch.unsqueeze(1)
             if args.run_2d:
                 images = local_batch.squeeze(1).permute(0, 2, 3, 1).numpy()
             else:
diff --git a/sparse_coding_torch/pnb/load_data.py b/sparse_coding_torch/pnb/load_data.py
index 3059e04..df1f449 100644
--- a/sparse_coding_torch/pnb/load_data.py
+++ b/sparse_coding_torch/pnb/load_data.py
@@ -66,12 +66,12 @@ def load_pnb_videos(yolo_model, batch_size, input_size, crop_size=None, mode=Non
     ])
     augment_transforms = torchvision.transforms.Compose(
 #     [torchvision.transforms.Resize(input_size[:2]),
-    [torchvision.transforms.RandomRotation(15),
+#     [torchvision.transforms.RandomRotation(15),
 #      torchvision.transforms.RandomHorizontalFlip(),
 #      torchvision.transforms.RandomVerticalFlip(),
-#      torchvision.transforms.ColorJitter(brightness=0.02),     
-#      torchvision.transforms.RandomAdjustSharpness(0, p=0.15),
-#      torchvision.transforms.RandomAffine(degrees=0, translate=(0.01, 0))
+     [torchvision.transforms.ColorJitter(brightness=0.02),     
+     torchvision.transforms.RandomAdjustSharpness(0, p=0.15),
+     torchvision.transforms.RandomAffine(degrees=0, translate=(0.01, 0))
 #      torchvision.transforms.CenterCrop((100, 200))
 #      torchvision.transforms.Resize(input_size[:2])
     ])
diff --git a/sparse_coding_torch/pnb/train_classifier.py b/sparse_coding_torch/pnb/train_classifier.py
index 9acc61d..9d631cc 100644
--- a/sparse_coding_torch/pnb/train_classifier.py
+++ b/sparse_coding_torch/pnb/train_classifier.py
@@ -23,11 +23,11 @@ from sparse_coding_torch.utils import VideoGrayScaler, MinMaxScaler
 import glob
 import cv2
 
-configproto = tf.compat.v1.ConfigProto()
-configproto.gpu_options.polling_inactive_delay_msecs = 5000
-configproto.gpu_options.allow_growth = True
-sess = tf.compat.v1.Session(config=configproto) 
-tf.compat.v1.keras.backend.set_session(sess)
+# configproto = tf.compat.v1.ConfigProto()
+# configproto.gpu_options.polling_inactive_delay_msecs = 5000
+# configproto.gpu_options.allow_growth = True
+# sess = tf.compat.v1.Session(config=configproto) 
+# tf.compat.v1.keras.backend.set_session(sess)
 
 def calculate_pnb_scores(input_videos, labels, yolo_model, sparse_model, recon_model, classifier_model, image_width, image_height, transform):
     all_predictions = []
@@ -251,6 +251,13 @@ if __name__ == "__main__":
 #                 test_videos_out.writelines(test_set)
         else:
             test_loader = None
+            
+#         test_videos = set()
+#         for labels, local_batch, vid_f in test_loader:
+#             test_videos.update(vid_f)
+#         print(test_videos)
+#         print('-------------------------------------------')
+#         continue
         
         if args.checkpoint:
             classifier_model = keras.models.load_model(args.checkpoint)
@@ -394,6 +401,8 @@ if __name__ == "__main__":
         test_videos = set()
         for labels, local_batch, vid_f in test_loader:
             test_videos.update(vid_f)
+            
+        print(test_videos)
 
         test_labels = [vid_f.split('/')[-3] for vid_f in test_videos]
 
@@ -402,6 +411,9 @@ if __name__ == "__main__":
         else:
             y_pred, y_true, fn, fp = calculate_pnb_scores_skipped_frames(test_videos, test_labels, yolo_model, sparse_model, recon_model, classifier_model, args.frames_to_skip, image_width, image_height, transform)
             
+        print(fn)
+        print(fp)
+            
         t2 = time.perf_counter()
 
         print('i_fold={}, time={:.2f}'.format(i_fold, t2-t1))
diff --git a/yolov4/core/yolov4.py b/yolov4/core/yolov4.py
index 247be68..bf63874 100644
--- a/yolov4/core/yolov4.py
+++ b/yolov4/core/yolov4.py
@@ -288,7 +288,6 @@ def decode_trt(conv_output, output_size, NUM_CLASS, STRIDES, ANCHORS, i=0, XYSCA
     return pred_xywh, pred_prob
     # return tf.concat([pred_xywh, pred_conf, pred_prob], axis=-1)
 
-
 def filter_boxes(box_xywh, scores, score_threshold=0.4, input_shape = tf.constant([416,416])):
     scores_max = tf.math.reduce_max(scores, axis=-1)
 
@@ -313,10 +312,9 @@ def filter_boxes(box_xywh, scores, score_threshold=0.4, input_shape = tf.constan
         box_maxes[..., 0:1],  # y_max
         box_maxes[..., 1:2]  # x_max
     ], axis=-1)
-    # return tf.concat([boxes, pred_conf], axis=-1)
+#     return tf.concat([boxes, pred_conf], axis=-1)
     return (boxes, pred_conf)
 
-
 def compute_loss(pred, conv, label, bboxes, STRIDES, NUM_CLASS, IOU_LOSS_THRESH, i=0):
     conv_shape  = tf.shape(conv)
     batch_size  = conv_shape[0]
diff --git a/yolov4/detect.py b/yolov4/detect.py
index e3f0f22..842f743 100644
--- a/yolov4/detect.py
+++ b/yolov4/detect.py
@@ -1,7 +1,7 @@
 import tensorflow as tf
-physical_devices = tf.config.experimental.list_physical_devices('GPU')
-if len(physical_devices) > 0:
-    tf.config.experimental.set_memory_growth(physical_devices[0], True)
+# physical_devices = tf.config.experimental.list_physical_devices('GPU')
+# if len(physical_devices) > 0:
+#     tf.config.experimental.set_memory_growth(physical_devices[0], True)
 from absl import app, flags, logging
 from absl.flags import FLAGS
 import core.utils as utils
@@ -25,9 +25,9 @@ flags.DEFINE_float('iou', 0.45, 'iou threshold')
 # flags.DEFINE_float('score', 0.1, 'score threshold')
 
 def main(_argv):
-    config = ConfigProto()
-    config.gpu_options.allow_growth = True
-    session = InteractiveSession(config=config)
+#     config = ConfigProto()
+#     config.gpu_options.allow_growth = True
+#     session = InteractiveSession(config=config)
     STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)
     input_size = FLAGS.size
     image_path = FLAGS.image
diff --git a/yolov4/get_bounding_boxes.py b/yolov4/get_bounding_boxes.py
index ae1d047..54ec72c 100644
--- a/yolov4/get_bounding_boxes.py
+++ b/yolov4/get_bounding_boxes.py
@@ -1,7 +1,7 @@
 from absl import app, flags, logging
 from absl.flags import FLAGS
 import tensorflow as tf
-physical_devices = tf.config.experimental.list_physical_devices('GPU')
+# physical_devices = tf.config.experimental.list_physical_devices('GPU')
 import yolov4.core.utils as utils
 from yolov4.core.yolov4 import filter_boxes
 from tensorflow.python.saved_model import tag_constants
-- 
GitLab