Compare commits

...

1 Commits

Author SHA1 Message Date
Thomas BONNIER 83b619aa57 first commit 2025-04-28 15:01:01 +02:00
469 changed files with 1957 additions and 0 deletions

173
Final.py Normal file
View File

@ -0,0 +1,173 @@
from ultralytics import YOLO
import cv2
import numpy as np
def load_camera_intrinsics(filename):
"""Load camera matrix and distortion coefficients from a YAML file."""
fs = cv2.FileStorage(filename, cv2.FILE_STORAGE_READ)
if not fs.isOpened():
print("Failed to open camera intrinsics file:", filename)
return None, None
camera_matrix = fs.getNode("cameraMatrix").mat()
dist_coeffs = fs.getNode("distCoeffs").mat()
fs.release()
return camera_matrix, dist_coeffs
def preprocess_frame(frame):
"""
Enhance the image to improve marker detection.
- Convert to grayscale.
- Apply CLAHE for local contrast.
- Apply bilateral filtering to reduce noise while preserving edges.
- Use morphological opening to remove small artifacts.
"""
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
cl1 = clahe.apply(gray)
bilateral = cv2.bilateralFilter(cl1, d=9, sigmaColor=75, sigmaSpace=75)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
processed = cv2.morphologyEx(bilateral, cv2.MORPH_OPEN, kernel)
return processed
def smooth_angle(previous, current, alpha=0.5):
"""Smooth the angle using exponential smoothing to reduce jitter."""
return alpha * current + (1 - alpha) * previous
def main():
# Load camera intrinsics from YAML (if available)
camera_matrix, dist_coeffs = load_camera_intrinsics("camera_intrinsics.yaml")
CALIBRATION_AVAILABLE = (camera_matrix is not None and dist_coeffs is not None)
# Open webcam (using camera index 1 as requested)
cap = cv2.VideoCapture(1)
if not cap.isOpened():
print("Cannot open camera")
return
# Initialize the YOLO model (trained to detect cans)
yolo_model = YOLO("runs/detect/train4/weights/best.pt")
# Initialize ArUco detector based on your OpenCV version
opencv_version = cv2.__version__.split('.')
major_version = int(opencv_version[0])
if major_version >= 4:
try:
# For OpenCV 4.7+ using the newer API
aruco_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_50)
parameters = cv2.aruco.DetectorParameters()
detector = cv2.aruco.ArucoDetector(aruco_dict, parameters)
detect_method = "new"
print("Using newest ArUco API (OpenCV 4.7+)")
except Exception as e:
# Fall back to older API
aruco_dict = cv2.aruco.Dictionary_create(4, 4)
try:
parameters = cv2.aruco.DetectorParameters_create()
except Exception:
parameters = None
detector = None
detect_method = "old"
print("Using older ArUco API (OpenCV 4.x)")
else:
aruco_dict = cv2.aruco.Dictionary_create(4, 4)
try:
parameters = cv2.aruco.DetectorParameters_create()
except Exception:
parameters = None
detector = None
detect_method = "old"
print("Using legacy ArUco API (OpenCV 3.x)")
# Marker size in meters (adjust to your actual marker size)
MARKER_LENGTH = 0.05
angle_history = {}
while True:
ret, frame = cap.read()
if not ret:
print("Failed to grab frame")
break
# Undistort frame if calibration data is available
if CALIBRATION_AVAILABLE:
frame = cv2.undistort(frame, camera_matrix, dist_coeffs, None, camera_matrix)
# Run YOLO detection on the frame
yolo_results = yolo_model.predict(source=frame, show=False)
yolo_boxes = []
if len(yolo_results) > 0:
boxes = yolo_results[0].boxes
if boxes is not None and len(boxes) > 0:
yolo_boxes = boxes.xyxy.cpu().numpy() # shape: (N, 4)
# Preprocess frame for ArUco detection
processed = preprocess_frame(frame)
if detect_method == "new" and detector is not None:
corners, ids, rejected = detector.detectMarkers(processed)
else:
corners, ids, rejected = cv2.aruco.detectMarkers(processed, aruco_dict, parameters=parameters)
# Create a copy for drawing results
display_frame = frame.copy()
# Draw YOLO detection boxes (blue)
for box in yolo_boxes:
x1, y1, x2, y2 = box.astype(int)
cv2.rectangle(display_frame, (x1, y1), (x2, y2), (255, 0, 0), 2)
# If ArUco markers are detected, overlay their information
if ids is not None:
cv2.aruco.drawDetectedMarkers(display_frame, corners, ids)
for i, marker_id in enumerate(ids):
pts = corners[i].reshape((4, 2))
# Compute orientation from first to second corner
dx = pts[1][0] - pts[0][0]
dy = pts[1][1] - pts[0][1]
current_angle = np.degrees(np.arctan2(dy, dx))
mid = marker_id[0]
if mid in angle_history:
smoothed_angle = smooth_angle(angle_history[mid], current_angle)
else:
smoothed_angle = current_angle
angle_history[mid] = smoothed_angle
# Draw text near the marker
center = np.mean(pts, axis=0).astype(int)
text = f"ID:{marker_id[0]} Angle:{smoothed_angle:.1f}"
cv2.putText(display_frame, text, (center[0]-50, center[1]),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# Compute the marker's bounding box
x_min = int(np.min(pts[:,0]))
y_min = int(np.min(pts[:,1]))
x_max = int(np.max(pts[:,0]))
y_max = int(np.max(pts[:,1]))
# Draw the ArUco bounding box (red) to indicate high-accuracy detection
cv2.rectangle(display_frame, (x_min, y_min), (x_max, y_max), (0, 0, 255), 2)
# (Optional) Here you could override the YOLO detection box for the can
# if the marker is found, for example by not drawing the blue box in that region.
# For now, we simply overlay the red box.
# Estimate pose if calibration data is available
if CALIBRATION_AVAILABLE:
try:
rvecs, tvecs, _ = cv2.aruco.estimatePoseSingleMarkers(
corners[i], MARKER_LENGTH, camera_matrix, dist_coeffs
)
cv2.drawFrameAxes(display_frame, camera_matrix, dist_coeffs, rvecs[0], tvecs[0], MARKER_LENGTH * 0.5)
except Exception as e:
print("Pose estimation error:", e)
# Display the combined detection result
cv2.imshow("YOLO + ArUco Detection", display_frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()

16
camera_intrinsics.yaml Normal file
View File

@ -0,0 +1,16 @@
%YAML:1.0
---
cameraMatrix: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 335.25610245664052, 0., 295.23529284876372, 0.,
332.38648153369775, 211.10855895422605, 0., 0., 1. ]
distCoeffs: !!opencv-matrix
rows: 1
cols: 5
dt: d
data: [ -0.44666929663304417, 0.20235297061909502,
0.018370253794944981, 0.00057076740044052212,
-0.042389499486614066 ]
MeanReprojectionError: 0.36762902799789193

102
can.py Normal file
View File

@ -0,0 +1,102 @@
import cv2
import numpy as np
import yaml
def load_camera_intrinsics(yaml_file):
"""
Load the camera matrix and distortion coefficients from a YAML file.
This function preprocesses the file by removing lines starting with '%',
which can cause parsing errors with certain YAML versions.
The file is expected to contain keys 'cameraMatrix' and 'distCoeffs'
in the OpenCV format.
"""
with open(yaml_file, 'r') as f:
content = f.read()
# Remove any YAML directives (lines starting with '%')
lines = content.splitlines()
filtered_lines = [line for line in lines if not line.lstrip().startswith('%')]
filtered_content = "\n".join(filtered_lines)
data = yaml.safe_load(filtered_content)
camera_matrix = np.array(data["cameraMatrix"]["data"]).reshape(
data["cameraMatrix"]["rows"], data["cameraMatrix"]["cols"]
)
dist_coeffs = np.array(data["distCoeffs"]["data"])
return camera_matrix, dist_coeffs
def detect_can_shape(frame):
"""
Detects can-like shapes in the frame using edge detection, contour extraction,
and ellipse fitting. Ellipses with an aspect ratio near 1 are drawn, assuming
they correspond to the circular top or a symmetric can shape.
"""
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
edges = cv2.Canny(blur, 50, 150)
contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
# Ignore small contours that are unlikely to represent a can
if area < 500:
continue
if len(cnt) >= 5:
ellipse = cv2.fitEllipse(cnt)
# Extract the ellipse parameters and compute aspect ratio
(x, y), (MA, ma), angle = ellipse
if ma == 0:
continue
aspect_ratio = MA / ma if MA < ma else ma / MA
# Consider ellipses with an aspect ratio near 1 as can-like shapes
if aspect_ratio > 0.7:
cv2.ellipse(frame, ellipse, (0, 255, 0), 2)
else:
cv2.drawContours(frame, [cnt], 0, (255, 0, 0), 2)
return frame
def main():
# Load camera intrinsics from the YAML file (camera_intrinsics.yaml)
camera_matrix, dist_coeffs = load_camera_intrinsics("camera_intrinsics.yaml")
# Open the camera with index 1
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print("Error: Could not open camera 1.")
return
# Optional: ArUco detection code (if needed)
# aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_4X4_50)
# parameters = cv2.aruco.DetectorParameters_create()
while True:
ret, frame = cap.read()
if not ret:
print("Failed to grab a frame")
break
# Undistort the captured frame
undistorted = cv2.undistort(frame, camera_matrix, dist_coeffs)
# Optional: ArUco detection on the undistorted frame
# gray = cv2.cvtColor(undistorted, cv2.COLOR_BGR2GRAY)
# corners, ids, rejected = cv2.aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
# if ids is not None:
# cv2.aruco.drawDetectedMarkers(undistorted, corners, ids)
# Detect can shape using contour and ellipse fitting
detected = detect_can_shape(undistorted)
cv2.imshow("Can Shape Detection", detected)
# Exit the loop on pressing 'q'
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()

189
create_dataset Normal file
View File

@ -0,0 +1,189 @@
import cv2
import os
import numpy as np
# Global variables
manual_boxes = [] # Boxes drawn manually (green)
auto_boxes = [] # Boxes detected automatically (red)
frame = None # Current frame (global)
def draw_boxes(img, manual, auto):
"""
Draws manual boxes in green and auto-detected boxes in red on the image.
"""
# Draw manual boxes (green)
for box in manual:
x1, y1, x2, y2 = box
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
# Draw auto-detected boxes (red)
for box in auto:
x1, y1, x2, y2 = box
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
return img
def draw_rectangle(event, x, y, flags, param):
"""
Mouse callback for manual box drawing.
Left-click and drag to draw a bounding box.
"""
global manual_boxes, frame, ix, iy, drawing
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
ix, iy = x, y
elif event == cv2.EVENT_MOUSEMOVE:
if drawing:
temp = frame.copy()
cv2.rectangle(temp, (ix, iy), (x, y), (0, 255, 0), 2)
# Also draw existing boxes
temp = draw_boxes(temp, manual_boxes, auto_boxes)
cv2.imshow("Dataset Collection", temp)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
# Draw final rectangle on the frame
cv2.rectangle(frame, (ix, iy), (x, y), (0, 255, 0), 2)
manual_boxes.append((ix, iy, x, y))
cv2.imshow("Dataset Collection", draw_boxes(frame.copy(), manual_boxes, auto_boxes))
def auto_detect(img):
"""
Attempts to detect candidate can regions using edge detection and contour analysis.
Returns a list of bounding boxes.
"""
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(blurred, 50, 150)
contours, _ = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
detected_boxes = []
for cnt in contours:
area = cv2.contourArea(cnt)
if area < 500: # filter out small contours
continue
x, y, w, h = cv2.boundingRect(cnt)
aspect_ratio = w / float(h)
# Many cans are roughly cylindrical so may appear with moderate aspect ratios.
if 0.5 < aspect_ratio < 2.0:
detected_boxes.append((x, y, x+w, y+h))
return detected_boxes
def save_annotation(image_path, boxes):
"""
Saves bounding boxes for an image in YOLO format (class x_center y_center width height).
Coordinates are normalized.
"""
image = cv2.imread(image_path)
h, w, _ = image.shape
label_path = image_path.replace("images", "labels").replace(".jpg", ".txt")
with open(label_path, 'w') as f:
for box in boxes:
x1, y1, x2, y2 = box
# Ensure coordinates are in proper order
xmin, xmax = min(x1, x2), max(x1, x2)
ymin, ymax = min(y1, y2), max(y1, y2)
x_center = ((xmin + xmax) / 2) / w
y_center = ((ymin + ymax) / 2) / h
box_width = (xmax - xmin) / w
box_height = (ymax - ymin) / h
# Class index 0 assumed for "can"
f.write(f"0 {x_center} {y_center} {box_width} {box_height}\n")
def get_next_img_count():
"""
Determines the next image count by scanning the dataset/images directory.
"""
images_dir = "dataset/images"
if not os.path.exists(images_dir):
return 0
img_files = [f for f in os.listdir(images_dir) if f.startswith("img_") and f.endswith(".jpg")]
counts = []
for f in img_files:
try:
count = int(f.split("_")[1].split(".")[0])
counts.append(count)
except ValueError:
pass
return max(counts)+1 if counts else 0
def main():
global frame, manual_boxes, auto_boxes, drawing, ix, iy
drawing = False
ix, iy = -1, -1
# Create directories for saving images and labels
os.makedirs("dataset/images", exist_ok=True)
os.makedirs("dataset/labels", exist_ok=True)
# Resume image count from existing files
img_count = get_next_img_count()
# Load camera intrinsics and distortion coefficients from YAML file
fs = cv2.FileStorage("camera_intrinsics.yaml", cv2.FILE_STORAGE_READ)
if not fs.isOpened():
print("Error: Cannot open camera_intrinsics.yaml")
return
cameraMatrix = fs.getNode("cameraMatrix").mat()
distCoeffs = fs.getNode("distCoeffs").mat()
fs.release()
# Open video capture (change camera ID if necessary)
cap = cv2.VideoCapture(1)
if not cap.isOpened():
print("Error: Cannot open camera")
return
cv2.namedWindow("Dataset Collection")
cv2.setMouseCallback("Dataset Collection", draw_rectangle)
while True:
ret, frame = cap.read()
if not ret:
print("Error: Failed to capture frame")
break
# Undistort the captured frame using the camera intrinsics
frame = cv2.undistort(frame, cameraMatrix, distCoeffs)
# Overlay existing boxes on a copy of the frame
display_frame = frame.copy()
display_frame = draw_boxes(display_frame, manual_boxes, auto_boxes)
cv2.putText(display_frame, "Keys: 'c' - capture, 'r' - reset, 'a' - auto-detect, 'q' - quit",
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2)
cv2.imshow("Dataset Collection", display_frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('c'):
# Save image and annotations (combining manual and auto-detected boxes)
combined_boxes = manual_boxes + auto_boxes
img_path = f"dataset/images/img_{img_count:04d}.jpg"
cv2.imwrite(img_path, frame)
save_annotation(img_path, combined_boxes)
print(f"Saved {img_path} with {len(combined_boxes)} bounding box(es).")
# Reset boxes for next capture
manual_boxes = []
auto_boxes = []
img_count += 1
elif key == ord('r'):
# Reset current bounding boxes
manual_boxes = []
auto_boxes = []
print("Bounding boxes reset for the current frame.")
elif key == ord('a'):
# Run auto-detection on the current frame and store results
auto_boxes = auto_detect(frame)
print(f"Auto detection found {len(auto_boxes)} candidate bounding box(es).")
# Update display with auto-detected boxes
cv2.imshow("Dataset Collection", draw_boxes(frame.copy(), manual_boxes, auto_boxes))
elif key == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()

11
data.yaml Normal file
View File

@ -0,0 +1,11 @@
# data.yaml
# Paths to your train and val sets
train: dataset/images/train
val: dataset/images/val
# Number of classes
nc: 1
# Class names
names: ["can"]

Binary file not shown.

After

Width:  |  Height:  |  Size: 85 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 77 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 87 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 77 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 74 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 71 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 76 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 77 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 76 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 73 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 72 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 66 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 74 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 75 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 75 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 74 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 69 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 117 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 106 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 88 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 87 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 89 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 96 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 89 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 96 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 98 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 98 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 101 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 103 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 105 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 105 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 97 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 105 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 106 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 108 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 106 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 96 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 107 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 110 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 111 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 110 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 112 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 107 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 109 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 101 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 102 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 106 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 112 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 110 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 89 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 87 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 84 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 88 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 89 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 78 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 99 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 82 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 81 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 76 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 97 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 99 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 111 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 113 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 100 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 97 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 121 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 88 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 84 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 82 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 77 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 74 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 73 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 82 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 84 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

Some files were not shown because too many files have changed in this diff Show More