diff --git a/Robot_Development/catkin_ws/src/lane_demo/src/lane_demo.cpp b/Robot_Development/catkin_ws/src/lane_demo/src/lane_demo.cpp
index bcca1db91180dc2733204ae43906e9ffd552dec7..851c0af6ae276bd9fa66c9a4c7418febbd2b216c 100644
--- a/Robot_Development/catkin_ws/src/lane_demo/src/lane_demo.cpp
+++ b/Robot_Development/catkin_ws/src/lane_demo/src/lane_demo.cpp
@@ -57,7 +57,7 @@ class SenseAndAvoid
     double elapsed = difftime(now, last_detection_time);
     
     STATE state;
-	float linear_speed = 0.5;
+	float linear_speed = 0.7;
 };
 SenseAndAvoid::SenseAndAvoid()
 {
@@ -74,10 +74,10 @@ SenseAndAvoid::SenseAndAvoid()
   left_count = 0;
   right_count = 0;
   cur_error = 0;
-  Kp = 0.003;
+  Kp = 0.01;
   last_detection_time = time(NULL);
   Kv = 0;
-  Kd = 0.002;
+  Kd = 0.035;
 }
 
 void SenseAndAvoid::Lanes(const std_msgs::String::ConstPtr& msg)
@@ -107,13 +107,8 @@ void SenseAndAvoid::Lane_Distance(const std_msgs::Float64::ConstPtr& msg)
 void SenseAndAvoid::Object_Detection(const std_msgs::String::ConstPtr& msg)
 {
 	last_detection_time = time(NULL);
-
-
 	std::string object = msg->data;
-	//printf("c_str objcet %s\n",object.c_str());
 	thanos = Detection(object.c_str());
-	//ROS_INFO("Detected Object : %s",thanos.to_string().c_str());
-  	//ROS_INFO("ELAPSED - SECONDS %lf", elapsed);
 }
 void SenseAndAvoid::leftEncoderCallback(const std_msgs::UInt64::ConstPtr& msg)
 {
@@ -170,7 +165,7 @@ Detection Variables
 		vel_msg.linear.x = 0;
 		vel_msg.angular.z = 0;
   }
-  else if(thanos._name == "stop" && thanos._prob > 80)
+  else if(thanos._name == "stop" && thanos._prob > 80 && thanos._h > 175)
   {
 	  	ROS_INFO("STOP SIGN FOUND");
 		vel_msg.linear.x = 0;
@@ -185,16 +180,24 @@ Detection Variables
   else if(thanos._name == "speedLimit25" && thanos._prob > 60)
   {
 		ROS_INFO("SPEED LIMIT 25");
-		linear_speed = 0.3;
+		linear_speed = 0.4;
+		cur_error = lane_distance;
+		integral_error += cur_error;
+		derivative_error = cur_error - prev_error;
 		vel_msg.linear.x = linear_speed;
-		vel_msg.angular.z = 0;
+		vel_msg.angular.z = (Kp * cur_error + Kv * integral_error + Kd * derivative_error);
+		prev_error = cur_error;
   }
   else if(thanos._name == "speedLimit35" && thanos._prob > 60)
   {
 		ROS_INFO("SPEED LIMIT 35");
 		linear_speed = 0.5;
+		cur_error = lane_distance;
+		integral_error += cur_error;
+		derivative_error = cur_error - prev_error;
 		vel_msg.linear.x = linear_speed;
-		vel_msg.angular.z = 0;
+		vel_msg.angular.z = (Kp * cur_error + Kv * integral_error + Kd * derivative_error);
+		prev_error = cur_error;
   }
   else if(state == STOP)
   {
@@ -202,42 +205,19 @@ Detection Variables
 	vel_msg.linear.x = 0;
 	vel_msg.angular.z = 0;
   }
-  else if(state == TURN_LEFT)
-  {
-	ROS_INFO("TURNING LEFT");
-	vel_msg.linear.x = linear_speed;
-	vel_msg.angular.z = -ANGULAR_SPEED;
-  }
-  else if(state == TURN_RIGHT)
-  {
-	ROS_INFO("TURNING_RIGHT");
-	vel_msg.linear.x = linear_speed;
-	vel_msg.angular.z = ANGULAR_SPEED;
-  }
   else
   {
-	cur_error = lane_distance;
-	ROS_INFO_STREAM(cur_error);
-	if(cur_error > 20 || cur_error < (-20))
-	{
+		cur_error = lane_distance;
+		ROS_INFO_STREAM(cur_error);
 		ROS_INFO("PID Control");
 		integral_error += cur_error;
 		derivative_error = cur_error - prev_error;
 		vel_msg.linear.x = linear_speed;
-		vel_msg.angular.z = -(Kp * cur_error + Kv * integral_error + Kd * derivative_error);
+		vel_msg.angular.z = (Kp * cur_error + Kv * integral_error + Kd * derivative_error);
 		prev_error = cur_error;
-	}
-	else
-	{
-		ROS_INFO("GOING STRAIGHT");
-		vel_msg.linear.x = linear_speed;
-		vel_msg.angular.z = 0;
-	}
   }
   vel_pub.publish(vel_msg);
 }
-
-
 int main(int argc, char **argv)
 {
   ros::init(argc, argv, "lane_demo");
diff --git a/Robot_Development/catkin_ws/src/lane_detection/src/detect.py b/Robot_Development/catkin_ws/src/lane_detection/src/detect.py
index fb0ba974ae24b23614495ef9757799c8991af782..e1ff5a8f2c111cf4adf95e76bf1e051f500623dd 100755
--- a/Robot_Development/catkin_ws/src/lane_detection/src/detect.py
+++ b/Robot_Development/catkin_ws/src/lane_detection/src/detect.py
@@ -19,44 +19,26 @@ width = 325
 # Converts picture to grayscale and applies filter to picture
 # params
 # pic : a numpy array of pixel values to represent a picture
-def formatImg(pic):
-    # Convert picture to gray scale
-    gray = cv2.cvtColor(pic, cv2.COLOR_BGR2GRAY)
-
-    # Apply filter to image
-    img_filter = cv2.GaussianBlur(gray, (5, 5), 0)
-    return img_filter
 	
 def thresh(pic):
     grayscaled = cv2.cvtColor(pic, cv2.COLOR_BGR2GRAY) #grayscale
-    retval, th = cv2.threshold(grayscaled, 80, 255, cv2.THRESH_BINARY) #filter out unncessary edges
+    retval, th = cv2.threshold(grayscaled, 60, 255, cv2.THRESH_BINARY) #filter out unncessary edges
     img_filter = cv2.GaussianBlur(th, (5, 5), 0) #Make the image more cohesive
     img_edge = cv2.Canny(img_filter, 50, 150) #find the edges
     return img_edge
 
-# pre-processes and performs edge detection on an image
-# params
-# pic: a numpy array of pixel values for an image in gray scale
-def detect_edge(pic):
-    # Perform canny edge detection
-    img_edge = cv2.Canny(pic, 50, 150)
-
-    # return new edge image
-    return img_edge
-
 # Define the region of in which the lanes will be in the cameras view
 # params
 # pic: original image to apply the pre-set region of interest too
 def ROI_real(pic):
     height = pic.shape[0]
     width = pic.shape[1]
-    triangle = np.array([[(0, height), (135, 250), (515, 250), (width, height)]], dtype=np.int32) #shape of a trapazoid
+    triangle = np.array([[(0, height), (0, 250), (width, 250), (width, height)]], dtype=np.int32) #shape of a trapazoid
     mask = np.zeros_like(pic)
     cv2.fillPoly(mask, triangle, 255)
     roi = cv2.bitwise_and(pic, mask)
     return roi
 
-
 # Get all possible HoughLines and return them
 # params
 # edge_pic: the image with the edge detection performed
@@ -126,18 +108,18 @@ def find_poly_lane(pic, lines):
         slope = parameters[0]
         intercept = parameters[1]
 
-        if (slope < .5 and slope > -.5):
+        if (slope < .75 and slope > -.75):
             # print("Line ignored");
             x = 1
 
-        elif (slope < 0):
+        elif (slope < 0 and (x1 < 300 and x2 < 300)):
             # print("left insert")
             left_lines_points[0].append(x1)
             left_lines_points[0].append(x2)
             left_lines_points[1].append(y1)
             left_lines_points[1].append(y2)
 
-        else:
+        elif (x1 > 300 and x2 > 300):
             # print("right insert")
             right_lines_points[0].append(x1)
             right_lines_points[0].append(x2)
@@ -217,10 +199,10 @@ def detectDeparture(left, car, right):
 
 
 def detectDepartureNew(midPoints):
-    midx = 300
+    midx = 325
     midy = 350
     for x in range(150):
-        if (myround(midPoints[1][x]) == 300):
+        if (myround(midPoints[1][x]) == midy):
             # print(midPoints[0][x] - midx)
             return midPoints[0][x] - midx
 
@@ -240,75 +222,54 @@ def lanes_detected(lanes):
     pub.publish(lanes)
     rate.sleep()
 
-
-# video = cv2.VideoCapture("test2.mp4")
-# video = cv2.VideoCapture("highway.mp4")
-
+def applyLanes(frame, right, left, middle):
+    if left[0] != []:
+         Lx1 = int(left[0][0])
+         Lx2 = int(left[0][149])
+         Ly1 = int(left[1][0])
+         Ly2 = int(left[1][149])
+         cv2.line(frame, (Lx1, Ly1), (Lx2, Ly2), (255,0,0), 3)
+
+    if right[0] != []:
+         Rx1 = int(right[0][0])
+         Rx2 = int(right[0][149])
+         Ry1 = int(right[1][0])
+         Ry2 = int(right[1][149])
+         cv2.line(frame, (Rx1, Ry1), (Rx2, Ry2), (255,0,0), 3)
+
+    if middle[0] != []:
+         Mx1 = int(middle[0][0])
+         Mx2 = int(middle[0][149])
+         My1 = int(middle[1][0])
+         My2 = int(middle[1][149])
+         cv2.line(frame, (Mx1, My1), (Mx2, My2), (255,0,0), 3)
 
 video = WebcamVideoStream(src=1).start()
-plt.ion()
-
+#frame_width = 640
+#frame_height = 480
+#out = cv2.VideoWriter('outpy.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 10, (frame_width, frame_height))
 while not rospy.is_shutdown():
     frame = video.read()
-    # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
-    # frame = cv2.imread("../frame.jpeg")
-    #frame_edge = detect_edge(frame)
-
-    test = thresh(frame)
-    #new_img = formatImg(frame)
-
-    #wEdges = detect_edge(new_img)
-
-    #cropped = ROI_real(wEdges)
-    cropped_2 = ROI_real(test)
-
-    #lines = getLines(cropped)
-    lines_2 = getLines(cropped_2)
-    if lines_2 is None:
-        x = 0
-
+    filterImg = thresh(frame)
+    cropped = ROI_real(filterImg)
+    lines = getLines(cropped)
+    if lines is None:
+        print("Failed to fine any lines/lanes")
     else:
-        #Rpoints, Lpoints = find_poly_lane(new_img, lines)
-        Rpoints_2, Lpoints_2 = find_poly_lane(test, lines_2)
-        #Mpoints = find_middle(Lpoints, Rpoints)
-        Mpoints_2 = find_middle(Lpoints_2,Rpoints_2)
-
-        # (type(Rpoints[0][0]))
-        plt.cla()
-        plt.clf()
-
-        #plt.scatter(Rpoints[0], Rpoints[1])
-        #plt.scatter(Lpoints[0], Lpoints[1])
-        plt.scatter(Rpoints_2[0], Rpoints_2[1])
-        plt.scatter(Lpoints_2[0], Lpoints_2[1])
-        #lanes = detectDeparture(Lpoints, 400, Rpoints)
-        lanes_2 = detectDeparture(Lpoints_2, 400, Rpoints_2)
-        #lanes_detected(lanes)
-        lanes_detected(lanes_2)
-        if (Mpoints_2 != 0):
-            #plt.scatter(Mpoints[0], Mpoints[1])
-            plt.scatter(Mpoints_2[0],Mpoints_2[1])
-            #lane_distance = detectDepartureNew(Mpoints)
-            lane_distance_2 = detectDepartureNew(Mpoints_2)
-            lane_status(lane_distance_2)
-
+        Rpoints, Lpoints = find_poly_lane(filterImg, lines)
+        Mpoints = find_middle(Lpoints,Rpoints)
+        lanes = detectDeparture(Lpoints, 350, Rpoints)
+        lanes_detected(lanes)
+        if (Mpoints != 0):
+            lane_distance = detectDepartureNew(Mpoints)
+            lane_status(lane_distance)
+            #applyLanes(frame, Rpoints, Lpoints, Mpoints)
         else:
             print("No midpoint calculated")
-        plt.scatter(300, 350)
-
-        plt.imshow(frame, zorder=0)
-
-        plt.pause(.001)
-
-    #lane = applyLines(frame, lines)
-    #cv2.imshow("thresh",test)
-    #cv2.imshow("cropped_2",cropped_2)
-    # cv2.imshow("edges", wEdges)
-    # cv2.imshow("cropped", cropped)
-    # cv2.imshow("frame", lane)
-
-    #key = cv2.waitKey(1)
-    #if key == 27:
-		#break
-    #video.release()
-    #cv2.destroyAllWindows()
+            if(lanes == "Left lane does not exist"):
+                lane_distance = -100
+                lane_status(lane_distance)
+            elif(lanes == "Right lane does not exist"):
+                lane_distance = 100
+                lane_status(lane_distance)
+    #out.write(frame)
\ No newline at end of file
diff --git a/Robot_Development/catkin_ws/src/lane_detection/src/detect_plot.py b/Robot_Development/catkin_ws/src/lane_detection/src/detect_plot.py
index 25c74f6047a0e601bb283600f6ad5fa6174be61b..5dcdd7055765474fbe569649d4307dbecb028bc3 100755
--- a/Robot_Development/catkin_ws/src/lane_detection/src/detect_plot.py
+++ b/Robot_Development/catkin_ws/src/lane_detection/src/detect_plot.py
@@ -16,34 +16,15 @@ rate = rospy.Rate(1000)  # 1000hz
 width = 325
 
 
-# Converts picture to grayscale and applies filter to picture
-# params
-# pic : a numpy array of pixel values to represent a picture
-def formatImg(pic):
-    # Convert picture to gray scale
-    gray = cv2.cvtColor(pic, cv2.COLOR_BGR2GRAY)
-
-    # Apply filter to image
-    img_filter = cv2.GaussianBlur(gray, (5, 5), 0)
-    return img_filter
 	
 def thresh(pic):
     grayscaled = cv2.cvtColor(pic, cv2.COLOR_BGR2GRAY) #grayscale
-    retval, th = cv2.threshold(grayscaled, 80, 255, cv2.THRESH_BINARY) #filter out unncessary edges
+    retval, th = cv2.threshold(grayscaled, 60, 255, cv2.THRESH_BINARY) #filter out unncessary edges
+    cv2.imshow("th",th)
     img_filter = cv2.GaussianBlur(th, (5, 5), 0) #Make the image more cohesive
     img_edge = cv2.Canny(img_filter, 50, 150) #find the edges
     return img_edge
-
-# pre-processes and performs edge detection on an image
-# params
-# pic: a numpy array of pixel values for an image in gray scale
-def detect_edge(pic):
-    # Perform canny edge detection
-    img_edge = cv2.Canny(pic, 50, 150)
-
-    # return new edge image
-    return img_edge
-
+	
 # Define the region of in which the lanes will be in the cameras view
 # params
 # pic: original image to apply the pre-set region of interest too
@@ -126,18 +107,18 @@ def find_poly_lane(pic, lines):
         slope = parameters[0]
         intercept = parameters[1]
 
-        if (slope < .5 and slope > -.5):
+        if (slope < .75 and slope > -.75):
             # print("Line ignored");
             x = 1
 
-        elif (slope < 0):
+        elif (slope < 0 and (x1 < 300 and x2 <300)):
             # print("left insert")
             left_lines_points[0].append(x1)
             left_lines_points[0].append(x2)
             left_lines_points[1].append(y1)
             left_lines_points[1].append(y2)
 
-        else:
+        elif (x1>300 and x2 > 300):
             # print("right insert")
             right_lines_points[0].append(x1)
             right_lines_points[0].append(x2)
@@ -217,10 +198,10 @@ def detectDeparture(left, car, right):
 
 
 def detectDepartureNew(midPoints):
-    midx = 300
+    midx = 325
     midy = 350
     for x in range(150):
-        if (myround(midPoints[1][x]) == 300):
+        if (myround(midPoints[1][x]) == midy):
             # print(midPoints[0][x] - midx)
             return midPoints[0][x] - midx
 
@@ -228,86 +209,46 @@ def detectDepartureNew(midPoints):
 def myround(x):
     return int(5 * round(float(x) / 5))
 
-
 def lane_status(lane_distance):
     rospy.loginfo(lane_distance)
     pub1.publish(lane_distance)
     rate.sleep()
 
-
 def lanes_detected(lanes):
     rospy.loginfo(lanes)
     pub.publish(lanes)
     rate.sleep()
 
-
-# video = cv2.VideoCapture("test2.mp4")
-# video = cv2.VideoCapture("highway.mp4")
-
-
 video = WebcamVideoStream(src=1).start()
 plt.ion()
 
 while not rospy.is_shutdown():
     frame = video.read()
-    # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
-    # frame = cv2.imread("../frame.jpeg")
-    #frame_edge = detect_edge(frame)
-
-    test = thresh(frame)
-    #new_img = formatImg(frame)
-
-    #wEdges = detect_edge(new_img)
-
-    #cropped = ROI_real(wEdges)
-    cropped_2 = ROI_real(test)
-
-    #lines = getLines(cropped)
-    lines_2 = getLines(cropped_2)
-    if lines_2 is None:
+    filterImg = thresh(frame)
+    cropped = ROI_real(filterImg)
+    lines = getLines(cropped)
+    if lines is None:
         x = 0
-
     else:
-        #Rpoints, Lpoints = find_poly_lane(new_img, lines)
-        Rpoints_2, Lpoints_2 = find_poly_lane(test, lines_2)
-        #Mpoints = find_middle(Lpoints, Rpoints)
-        Mpoints_2 = find_middle(Lpoints_2,Rpoints_2)
-
-        # (type(Rpoints[0][0]))
         plt.cla()
         plt.clf()
-
-        #plt.scatter(Rpoints[0], Rpoints[1])
-        #plt.scatter(Lpoints[0], Lpoints[1])
-        plt.scatter(Rpoints_2[0], Rpoints_2[1])
-        plt.scatter(Lpoints_2[0], Lpoints_2[1])
-        #lanes = detectDeparture(Lpoints, 400, Rpoints)
-        lanes_2 = detectDeparture(Lpoints_2, 400, Rpoints_2)
-        #lanes_detected(lanes)
-        lanes_detected(lanes_2)
-        if (Mpoints_2 != 0):
-            #plt.scatter(Mpoints[0], Mpoints[1])
-            plt.scatter(Mpoints_2[0],Mpoints_2[1])
-            #lane_distance = detectDepartureNew(Mpoints)
-            lane_distance_2 = detectDepartureNew(Mpoints_2)
-            lane_status(lane_distance_2)
-
+        Rpoints, Lpoints = find_poly_lane(filterImg, lines)
+        Mpoints = find_middle(Lpoints,Rpoints)
+        plt.scatter(Rpoints[0], Rpoints[1])
+        plt.scatter(Lpoints[0], Lpoints[1])
+        lanes = detectDeparture(Lpoints, 400, Rpoints)
+        lanes_detected(lanes)
+        if (Mpoints != 0):
+            plt.scatter(Mpoints[0],Mpoints[1])
+            lane_distance = detectDepartureNew(Mpoints)
+            lane_status(lane_distance)
         else:
             print("No midpoint calculated")
-		plt.scatter(300, 350)
+        plt.scatter(325, 350)
         plt.imshow(frame, zorder=0)
         plt.pause(.001)
-
-	
-	#lane = applyLines(frame, lines)
-    cv2.imshow("thresh",test)
-    cv2.imshow("cropped_2",cropped_2)
-    # cv2.imshow("edges", wEdges)
-    # cv2.imshow("cropped", cropped)
-    # cv2.imshow("frame", lane)
-
+    cv2.imshow("thresh",filterImg)
+    cv2.imshow("cropped",cropped)
     key = cv2.waitKey(1)
     if key == 27:
-		break
-    #video.release()
-    #cv2.destroyAllWindows()
+		break
\ No newline at end of file
diff --git a/Robot_Development/catkin_ws/src/lane_detection/src/outpy.avi b/Robot_Development/catkin_ws/src/lane_detection/src/outpy.avi
new file mode 100644
index 0000000000000000000000000000000000000000..498947e66ba44e37d0091688bfaf422f6957192c
Binary files /dev/null and b/Robot_Development/catkin_ws/src/lane_detection/src/outpy.avi differ